1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_CodeStubs.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "nativeInst_sparc.hpp" 32 #include "runtime/sharedRuntime.hpp" 33 #include "utilities/macros.hpp" 34 #include "vmreg_sparc.inline.hpp" 35 #if INCLUDE_ALL_GCS 36 #include "gc/g1/g1BarrierSet.hpp" 37 #endif // INCLUDE_ALL_GCS 38 39 #define __ ce->masm()-> 40 41 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 42 : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { 43 assert(info != NULL, "must have info"); 44 _info = new CodeEmitInfo(info); 45 } 46 47 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) 48 : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { 49 assert(info != NULL, "must have info"); 50 _info = new CodeEmitInfo(info); 51 } 52 53 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 54 __ bind(_entry); 55 56 if (_info->deoptimize_on_exception()) { 57 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 58 __ call(a, relocInfo::runtime_call_type); 59 __ delayed()->nop(); 60 ce->add_call_info_here(_info); 61 ce->verify_oop_map(_info); 62 debug_only(__ should_not_reach_here()); 63 return; 64 } 65 66 if (_index->is_register()) { 67 __ mov(_index->as_register(), G4); 68 } else { 69 __ set(_index->as_jint(), G4); 70 } 71 if (_throw_index_out_of_bounds_exception) { 72 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type); 73 } else { 74 __ mov(_array->as_pointer_register(), G5); 75 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type); 76 } 77 __ delayed()->nop(); 78 ce->add_call_info_here(_info); 79 ce->verify_oop_map(_info); 80 debug_only(__ should_not_reach_here()); 81 } 82 83 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 84 _info = new CodeEmitInfo(info); 85 } 86 87 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 88 __ bind(_entry); 89 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 90 __ call(a, relocInfo::runtime_call_type); 91 __ delayed()->nop(); 92 ce->add_call_info_here(_info); 93 ce->verify_oop_map(_info); 94 debug_only(__ should_not_reach_here()); 95 } 96 97 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 98 __ bind(_entry); 99 __ set(_bci, G4); 100 Metadata *m = _method->as_constant_ptr()->as_metadata(); 101 __ set_metadata_constant(m, G5); 102 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); 103 __ delayed()->nop(); 104 ce->add_call_info_here(_info); 105 ce->verify_oop_map(_info); 106 107 __ br(Assembler::always, true, Assembler::pt, _continuation); 108 __ delayed()->nop(); 109 } 110 111 112 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 113 if (_offset != -1) { 114 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 115 } 116 __ bind(_entry); 117 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type); 118 __ delayed()->nop(); 119 ce->add_call_info_here(_info); 120 ce->verify_oop_map(_info); 121 #ifdef ASSERT 122 __ should_not_reach_here(); 123 #endif 124 } 125 126 127 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 128 address a; 129 if (_info->deoptimize_on_exception()) { 130 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 131 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 132 } else { 133 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 134 } 135 136 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 137 __ bind(_entry); 138 __ call(a, relocInfo::runtime_call_type); 139 __ delayed()->nop(); 140 ce->add_call_info_here(_info); 141 ce->verify_oop_map(_info); 142 #ifdef ASSERT 143 __ should_not_reach_here(); 144 #endif 145 } 146 147 148 // Implementation of SimpleExceptionStub 149 // Note: %g1 and %g3 are already in use 150 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 151 __ bind(_entry); 152 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type); 153 154 if (_obj->is_valid()) { 155 __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub 156 } else { 157 __ delayed()->mov(G0, G4); 158 } 159 ce->add_call_info_here(_info); 160 #ifdef ASSERT 161 __ should_not_reach_here(); 162 #endif 163 } 164 165 166 // Implementation of NewInstanceStub 167 168 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 169 _result = result; 170 _klass = klass; 171 _klass_reg = klass_reg; 172 _info = new CodeEmitInfo(info); 173 assert(stub_id == Runtime1::new_instance_id || 174 stub_id == Runtime1::fast_new_instance_id || 175 stub_id == Runtime1::fast_new_instance_init_check_id, 176 "need new_instance id"); 177 _stub_id = stub_id; 178 } 179 180 181 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 182 __ bind(_entry); 183 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type); 184 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5); 185 ce->add_call_info_here(_info); 186 ce->verify_oop_map(_info); 187 __ br(Assembler::always, false, Assembler::pt, _continuation); 188 __ delayed()->mov_or_nop(O0, _result->as_register()); 189 } 190 191 192 // Implementation of NewTypeArrayStub 193 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 194 _klass_reg = klass_reg; 195 _length = length; 196 _result = result; 197 _info = new CodeEmitInfo(info); 198 } 199 200 201 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 202 __ bind(_entry); 203 204 __ mov(_length->as_register(), G4); 205 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type); 206 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5); 207 ce->add_call_info_here(_info); 208 ce->verify_oop_map(_info); 209 __ br(Assembler::always, false, Assembler::pt, _continuation); 210 __ delayed()->mov_or_nop(O0, _result->as_register()); 211 } 212 213 214 // Implementation of NewObjectArrayStub 215 216 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 217 _klass_reg = klass_reg; 218 _length = length; 219 _result = result; 220 _info = new CodeEmitInfo(info); 221 } 222 223 224 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 225 __ bind(_entry); 226 227 __ mov(_length->as_register(), G4); 228 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type); 229 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5); 230 ce->add_call_info_here(_info); 231 ce->verify_oop_map(_info); 232 __ br(Assembler::always, false, Assembler::pt, _continuation); 233 __ delayed()->mov_or_nop(O0, _result->as_register()); 234 } 235 236 237 // Implementation of MonitorAccessStubs 238 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 239 : MonitorAccessStub(obj_reg, lock_reg) { 240 _info = new CodeEmitInfo(info); 241 } 242 243 244 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 245 __ bind(_entry); 246 __ mov(_obj_reg->as_register(), G4); 247 if (ce->compilation()->has_fpu_code()) { 248 __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type); 249 } else { 250 __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type); 251 } 252 __ delayed()->mov_or_nop(_lock_reg->as_register(), G5); 253 ce->add_call_info_here(_info); 254 ce->verify_oop_map(_info); 255 __ br(Assembler::always, true, Assembler::pt, _continuation); 256 __ delayed()->nop(); 257 } 258 259 260 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 261 __ bind(_entry); 262 if (_compute_lock) { 263 ce->monitor_address(_monitor_ix, _lock_reg); 264 } 265 if (ce->compilation()->has_fpu_code()) { 266 __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type); 267 } else { 268 __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type); 269 } 270 271 __ delayed()->mov_or_nop(_lock_reg->as_register(), G4); 272 __ br(Assembler::always, true, Assembler::pt, _continuation); 273 __ delayed()->nop(); 274 } 275 276 // Implementation of patching: 277 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 278 // - Replace original code with a call to the stub 279 // At Runtime: 280 // - call to stub, jump to runtime 281 // - in runtime: preserve all registers (especially objects, i.e., source and destination object) 282 // - in runtime: after initializing class, restore original code, reexecute instruction 283 284 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 285 286 void PatchingStub::align_patch_site(MacroAssembler* ) { 287 // patch sites on sparc are always properly aligned. 288 } 289 290 void PatchingStub::emit_code(LIR_Assembler* ce) { 291 // copy original code here 292 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, 293 "not enough room for call"); 294 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes"); 295 296 Label call_patch; 297 298 int being_initialized_entry = __ offset(); 299 300 if (_id == load_klass_id) { 301 // produce a copy of the load klass instruction for use by the being initialized case 302 #ifdef ASSERT 303 address start = __ pc(); 304 #endif 305 AddressLiteral addrlit(NULL, metadata_Relocation::spec(_index)); 306 __ patchable_set(addrlit, _obj); 307 308 #ifdef ASSERT 309 for (int i = 0; i < _bytes_to_copy; i++) { 310 address ptr = (address)(_pc_start + i); 311 int a_byte = (*ptr) & 0xFF; 312 assert(a_byte == *start++, "should be the same code"); 313 } 314 #endif 315 } else if (_id == load_mirror_id || _id == load_appendix_id) { 316 // produce a copy of the load mirror instruction for use by the being initialized case 317 #ifdef ASSERT 318 address start = __ pc(); 319 #endif 320 AddressLiteral addrlit(NULL, oop_Relocation::spec(_index)); 321 __ patchable_set(addrlit, _obj); 322 323 #ifdef ASSERT 324 for (int i = 0; i < _bytes_to_copy; i++) { 325 address ptr = (address)(_pc_start + i); 326 int a_byte = (*ptr) & 0xFF; 327 assert(a_byte == *start++, "should be the same code"); 328 } 329 #endif 330 } else { 331 // make a copy the code which is going to be patched. 332 for (int i = 0; i < _bytes_to_copy; i++) { 333 address ptr = (address)(_pc_start + i); 334 int a_byte = (*ptr) & 0xFF; 335 __ emit_int8 (a_byte); 336 } 337 } 338 339 address end_of_patch = __ pc(); 340 int bytes_to_skip = 0; 341 if (_id == load_mirror_id) { 342 int offset = __ offset(); 343 if (CommentedAssembly) { 344 __ block_comment(" being_initialized check"); 345 } 346 347 // static field accesses have special semantics while the class 348 // initializer is being run so we emit a test which can be used to 349 // check that this code is being executed by the initializing 350 // thread. 351 assert(_obj != noreg, "must be a valid register"); 352 assert(_index >= 0, "must have oop index"); 353 __ ld_ptr(_obj, java_lang_Class::klass_offset_in_bytes(), G3); 354 __ ld_ptr(G3, in_bytes(InstanceKlass::init_thread_offset()), G3); 355 __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch); 356 357 // load_klass patches may execute the patched code before it's 358 // copied back into place so we need to jump back into the main 359 // code of the nmethod to continue execution. 360 __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation); 361 __ delayed()->nop(); 362 363 // make sure this extra code gets skipped 364 bytes_to_skip += __ offset() - offset; 365 } 366 367 // Now emit the patch record telling the runtime how to find the 368 // pieces of the patch. We only need 3 bytes but it has to be 369 // aligned as an instruction so emit 4 bytes. 370 int sizeof_patch_record = 4; 371 bytes_to_skip += sizeof_patch_record; 372 373 // emit the offsets needed to find the code to patch 374 int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record; 375 376 // Emit the patch record. We need to emit a full word, so emit an extra empty byte 377 __ emit_int8(0); 378 __ emit_int8(being_initialized_entry_offset); 379 __ emit_int8(bytes_to_skip); 380 __ emit_int8(_bytes_to_copy); 381 address patch_info_pc = __ pc(); 382 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 383 384 address entry = __ pc(); 385 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 386 address target = NULL; 387 relocInfo::relocType reloc_type = relocInfo::none; 388 switch (_id) { 389 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 390 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; 391 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; 392 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; 393 default: ShouldNotReachHere(); 394 } 395 __ bind(call_patch); 396 397 if (CommentedAssembly) { 398 __ block_comment("patch entry point"); 399 } 400 __ call(target, relocInfo::runtime_call_type); 401 __ delayed()->nop(); 402 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 403 ce->add_call_info_here(_info); 404 __ br(Assembler::always, false, Assembler::pt, _patch_site_entry); 405 __ delayed()->nop(); 406 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { 407 CodeSection* cs = __ code_section(); 408 address pc = (address)_pc_start; 409 RelocIterator iter(cs, pc, pc + 1); 410 relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none); 411 412 pc = (address)(_pc_start + NativeMovConstReg::add_offset); 413 RelocIterator iter2(cs, pc, pc+1); 414 relocInfo::change_reloc_info_for_address(&iter2, (address) pc, reloc_type, relocInfo::none); 415 } 416 417 } 418 419 420 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 421 __ bind(_entry); 422 __ set(_trap_request, G4); 423 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type); 424 __ delayed()->nop(); 425 ce->add_call_info_here(_info); 426 DEBUG_ONLY(__ should_not_reach_here()); 427 } 428 429 430 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 431 //---------------slow case: call to native----------------- 432 __ bind(_entry); 433 __ mov(src()->as_register(), O0); 434 __ mov(src_pos()->as_register(), O1); 435 __ mov(dst()->as_register(), O2); 436 __ mov(dst_pos()->as_register(), O3); 437 __ mov(length()->as_register(), O4); 438 439 ce->emit_static_call_stub(); 440 if (ce->compilation()->bailed_out()) { 441 return; // CodeCache is full 442 } 443 444 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); 445 __ delayed()->nop(); 446 ce->add_call_info_here(info()); 447 ce->verify_oop_map(info()); 448 449 #ifndef PRODUCT 450 __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0); 451 __ ld(O0, 0, O1); 452 __ inc(O1); 453 __ st(O1, 0, O0); 454 #endif 455 456 __ br(Assembler::always, false, Assembler::pt, _continuation); 457 __ delayed()->nop(); 458 } 459 460 461 /////////////////////////////////////////////////////////////////////////////////// 462 #if INCLUDE_ALL_GCS 463 464 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { 465 // At this point we know that marking is in progress. 466 // If do_load() is true then we have to emit the 467 // load of the previous value; otherwise it has already 468 // been loaded into _pre_val. 469 470 __ bind(_entry); 471 472 assert(pre_val()->is_register(), "Precondition."); 473 Register pre_val_reg = pre_val()->as_register(); 474 475 if (do_load()) { 476 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 477 } 478 479 if (__ is_in_wdisp16_range(_continuation)) { 480 __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation); 481 } else { 482 __ cmp(pre_val_reg, G0); 483 __ brx(Assembler::equal, false, Assembler::pn, _continuation); 484 } 485 __ delayed()->nop(); 486 487 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id)); 488 __ delayed()->mov(pre_val_reg, G4); 489 __ br(Assembler::always, false, Assembler::pt, _continuation); 490 __ delayed()->nop(); 491 492 } 493 494 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { 495 __ bind(_entry); 496 497 assert(addr()->is_register(), "Precondition."); 498 assert(new_val()->is_register(), "Precondition."); 499 Register addr_reg = addr()->as_pointer_register(); 500 Register new_val_reg = new_val()->as_register(); 501 502 if (__ is_in_wdisp16_range(_continuation)) { 503 __ br_null(new_val_reg, /*annul*/false, Assembler::pt, _continuation); 504 } else { 505 __ cmp(new_val_reg, G0); 506 __ brx(Assembler::equal, false, Assembler::pn, _continuation); 507 } 508 __ delayed()->nop(); 509 510 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id)); 511 __ delayed()->mov(addr_reg, G4); 512 __ br(Assembler::always, false, Assembler::pt, _continuation); 513 __ delayed()->nop(); 514 } 515 516 #endif // INCLUDE_ALL_GCS 517 /////////////////////////////////////////////////////////////////////////////////// 518 519 #undef __