1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2018 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_CodeStubs.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "nativeInst_ppc.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "utilities/macros.hpp" 35 #include "vmreg_ppc.inline.hpp" 36 37 #define __ ce->masm()-> 38 39 40 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 41 : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { 42 assert(info != NULL, "must have info"); 43 _info = new CodeEmitInfo(info); 44 } 45 46 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) 47 : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { 48 assert(info != NULL, "must have info"); 49 _info = new CodeEmitInfo(info); 50 } 51 52 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 53 __ bind(_entry); 54 55 if (_info->deoptimize_on_exception()) { 56 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 57 // May be used by optimizations like LoopInvariantCodeMotion or RangeCheckEliminator. 58 DEBUG_ONLY( __ untested("RangeCheckStub: predicate_failed_trap_id"); ) 59 //__ load_const_optimized(R0, a); 60 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); 61 __ mtctr(R0); 62 __ bctrl(); 63 ce->add_call_info_here(_info); 64 ce->verify_oop_map(_info); 65 debug_only(__ illtrap()); 66 return; 67 } 68 69 address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id) 70 : Runtime1::entry_for(Runtime1::throw_range_check_failed_id); 71 //__ load_const_optimized(R0, stub); 72 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 73 __ mtctr(R0); 74 75 Register index = R0; 76 if (_index->is_register()) { 77 __ extsw(index, _index->as_register()); 78 } else { 79 __ load_const_optimized(index, _index->as_jint()); 80 } 81 if (_array) { 82 __ std(_array->as_pointer_register(), -8, R1_SP); 83 } 84 __ std(index, -16, R1_SP); 85 86 __ bctrl(); 87 ce->add_call_info_here(_info); 88 ce->verify_oop_map(_info); 89 debug_only(__ illtrap()); 90 } 91 92 93 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 94 _info = new CodeEmitInfo(info); 95 } 96 97 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 98 __ bind(_entry); 99 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 100 //__ load_const_optimized(R0, a); 101 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); 102 __ mtctr(R0); 103 __ bctrl(); 104 ce->add_call_info_here(_info); 105 ce->verify_oop_map(_info); 106 debug_only(__ illtrap()); 107 } 108 109 110 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 111 __ bind(_entry); 112 113 // Parameter 1: bci 114 __ load_const_optimized(R0, _bci); 115 __ std(R0, -16, R1_SP); 116 117 // Parameter 2: Method* 118 Metadata *m = _method->as_constant_ptr()->as_metadata(); 119 AddressLiteral md = __ constant_metadata_address(m); // Notify OOP recorder (don't need the relocation). 120 __ load_const_optimized(R0, md.value()); 121 __ std(R0, -8, R1_SP); 122 123 address a = Runtime1::entry_for(Runtime1::counter_overflow_id); 124 //__ load_const_optimized(R0, a); 125 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); 126 __ mtctr(R0); 127 __ bctrl(); 128 ce->add_call_info_here(_info); 129 ce->verify_oop_map(_info); 130 131 __ b(_continuation); 132 } 133 134 135 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 136 if (_offset != -1) { 137 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 138 } 139 __ bind(_entry); 140 address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id); 141 //__ load_const_optimized(R0, stub); 142 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 143 __ mtctr(R0); 144 __ bctrl(); 145 ce->add_call_info_here(_info); 146 ce->verify_oop_map(_info); 147 debug_only(__ illtrap()); 148 } 149 150 151 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 152 address a; 153 if (_info->deoptimize_on_exception()) { 154 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 155 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 156 } else { 157 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 158 } 159 160 if (ImplicitNullChecks || TrapBasedNullChecks) { 161 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 162 } 163 __ bind(_entry); 164 //__ load_const_optimized(R0, a); 165 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); 166 __ mtctr(R0); 167 __ bctrl(); 168 ce->add_call_info_here(_info); 169 ce->verify_oop_map(_info); 170 debug_only(__ illtrap()); 171 } 172 173 174 // Implementation of SimpleExceptionStub 175 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 176 __ bind(_entry); 177 address stub = Runtime1::entry_for(_stub); 178 //__ load_const_optimized(R0, stub); 179 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 180 if (_obj->is_valid()) { __ mr_if_needed(/*tmp1 in do_CheckCast*/ R4_ARG2, _obj->as_register()); } 181 __ mtctr(R0); 182 __ bctrl(); 183 ce->add_call_info_here(_info); 184 debug_only( __ illtrap(); ) 185 } 186 187 188 // Implementation of NewInstanceStub 189 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 190 _result = result; 191 _klass = klass; 192 _klass_reg = klass_reg; 193 _info = new CodeEmitInfo(info); 194 assert(stub_id == Runtime1::new_instance_id || 195 stub_id == Runtime1::fast_new_instance_id || 196 stub_id == Runtime1::fast_new_instance_init_check_id, 197 "need new_instance id"); 198 _stub_id = stub_id; 199 } 200 201 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 202 __ bind(_entry); 203 204 address entry = Runtime1::entry_for(_stub_id); 205 //__ load_const_optimized(R0, entry); 206 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry)); 207 __ mtctr(R0); 208 __ bctrl(); 209 ce->add_call_info_here(_info); 210 ce->verify_oop_map(_info); 211 __ b(_continuation); 212 } 213 214 215 // Implementation of NewTypeArrayStub 216 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 217 _klass_reg = klass_reg; 218 _length = length; 219 _result = result; 220 _info = new CodeEmitInfo(info); 221 } 222 223 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 224 __ bind(_entry); 225 226 address entry = Runtime1::entry_for(Runtime1::new_type_array_id); 227 //__ load_const_optimized(R0, entry); 228 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry)); 229 __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended 230 __ mtctr(R0); 231 __ bctrl(); 232 ce->add_call_info_here(_info); 233 ce->verify_oop_map(_info); 234 __ b(_continuation); 235 } 236 237 238 // Implementation of NewObjectArrayStub 239 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 240 _klass_reg = klass_reg; 241 _length = length; 242 _result = result; 243 _info = new CodeEmitInfo(info); 244 } 245 246 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 247 __ bind(_entry); 248 249 address entry = Runtime1::entry_for(Runtime1::new_object_array_id); 250 //__ load_const_optimized(R0, entry); 251 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry)); 252 __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended 253 __ mtctr(R0); 254 __ bctrl(); 255 ce->add_call_info_here(_info); 256 ce->verify_oop_map(_info); 257 __ b(_continuation); 258 } 259 260 261 // Implementation of MonitorAccessStubs 262 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 263 : MonitorAccessStub(obj_reg, lock_reg) { 264 _info = new CodeEmitInfo(info); 265 } 266 267 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 268 __ bind(_entry); 269 address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id); 270 //__ load_const_optimized(R0, stub); 271 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 272 __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register()); 273 assert(_lock_reg->as_register() == R5_ARG3, ""); 274 __ mtctr(R0); 275 __ bctrl(); 276 ce->add_call_info_here(_info); 277 ce->verify_oop_map(_info); 278 __ b(_continuation); 279 } 280 281 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 282 __ bind(_entry); 283 if (_compute_lock) { 284 ce->monitor_address(_monitor_ix, _lock_reg); 285 } 286 address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id); 287 //__ load_const_optimized(R0, stub); 288 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 289 assert(_lock_reg->as_register() == R4_ARG2, ""); 290 __ mtctr(R0); 291 __ bctrl(); 292 __ b(_continuation); 293 } 294 295 296 // Implementation of patching: 297 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes). 298 // - Replace original code with a call to the stub. 299 // At Runtime: 300 // - call to stub, jump to runtime 301 // - in runtime: preserve all registers (especially objects, i.e., source and destination object) 302 // - in runtime: after initializing class, restore original code, reexecute instruction 303 304 int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord); 305 306 void PatchingStub::align_patch_site(MacroAssembler* ) { 307 // Patch sites on ppc are always properly aligned. 308 } 309 310 #ifdef ASSERT 311 inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) { 312 address start = template_start; 313 for (int i = 0; i < bytes_to_copy; i++) { 314 address ptr = (address)(pc_start + i); 315 int a_byte = (*ptr) & 0xFF; 316 assert(a_byte == *start++, "should be the same code"); 317 } 318 } 319 #endif 320 321 void PatchingStub::emit_code(LIR_Assembler* ce) { 322 // copy original code here 323 assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, 324 "not enough room for call"); 325 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes"); 326 327 Label call_patch; 328 329 int being_initialized_entry = __ offset(); 330 331 if (_id == load_klass_id) { 332 // Produce a copy of the load klass instruction for use by the being initialized case. 333 AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(_index)); 334 __ load_const(_obj, addrlit, R0); 335 DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); ) 336 } else if (_id == load_mirror_id || _id == load_appendix_id) { 337 // Produce a copy of the load mirror instruction for use by the being initialized case. 338 AddressLiteral addrlit((address)NULL, oop_Relocation::spec(_index)); 339 __ load_const(_obj, addrlit, R0); 340 DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); ) 341 } else { 342 // Make a copy the code which is going to be patched. 343 for (int i = 0; i < _bytes_to_copy; i++) { 344 address ptr = (address)(_pc_start + i); 345 int a_byte = (*ptr) & 0xFF; 346 __ emit_int8 (a_byte); 347 } 348 } 349 350 address end_of_patch = __ pc(); 351 int bytes_to_skip = 0; 352 if (_id == load_mirror_id) { 353 int offset = __ offset(); 354 __ block_comment(" being_initialized check"); 355 356 // Static field accesses have special semantics while the class 357 // initializer is being run so we emit a test which can be used to 358 // check that this code is being executed by the initializing 359 // thread. 360 assert(_obj != noreg, "must be a valid register"); 361 assert(_index >= 0, "must have oop index"); 362 __ mr(R0, _obj); // spill 363 __ ld(_obj, java_lang_Class::klass_offset_in_bytes(), _obj); 364 __ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj); 365 __ cmpd(CCR0, _obj, R16_thread); 366 __ mr(_obj, R0); // restore 367 __ bne(CCR0, call_patch); 368 369 // Load_klass patches may execute the patched code before it's 370 // copied back into place so we need to jump back into the main 371 // code of the nmethod to continue execution. 372 __ b(_patch_site_continuation); 373 374 // Make sure this extra code gets skipped. 375 bytes_to_skip += __ offset() - offset; 376 } 377 378 // Now emit the patch record telling the runtime how to find the 379 // pieces of the patch. We only need 3 bytes but it has to be 380 // aligned as an instruction so emit 4 bytes. 381 int sizeof_patch_record = 4; 382 bytes_to_skip += sizeof_patch_record; 383 384 // Emit the offsets needed to find the code to patch. 385 int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record; 386 387 // Emit the patch record. We need to emit a full word, so emit an extra empty byte. 388 __ emit_int8(0); 389 __ emit_int8(being_initialized_entry_offset); 390 __ emit_int8(bytes_to_skip); 391 __ emit_int8(_bytes_to_copy); 392 address patch_info_pc = __ pc(); 393 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 394 395 address entry = __ pc(); 396 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 397 address target = NULL; 398 relocInfo::relocType reloc_type = relocInfo::none; 399 switch (_id) { 400 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 401 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); 402 reloc_type = relocInfo::metadata_type; break; 403 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); 404 reloc_type = relocInfo::oop_type; break; 405 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); 406 reloc_type = relocInfo::oop_type; break; 407 default: ShouldNotReachHere(); 408 } 409 __ bind(call_patch); 410 411 __ block_comment("patch entry point"); 412 //__ load_const(R0, target); + mtctr + bctrl must have size -_patch_info_offset 413 __ load_const32(R0, MacroAssembler::offset_to_global_toc(target)); 414 __ add(R0, R29_TOC, R0); 415 __ mtctr(R0); 416 __ bctrl(); 417 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 418 ce->add_call_info_here(_info); 419 __ b(_patch_site_entry); 420 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { 421 CodeSection* cs = __ code_section(); 422 address pc = (address)_pc_start; 423 RelocIterator iter(cs, pc, pc + 1); 424 relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none); 425 } 426 } 427 428 429 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 430 __ bind(_entry); 431 address stub = Runtime1::entry_for(Runtime1::deoptimize_id); 432 //__ load_const_optimized(R0, stub); 433 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 434 __ mtctr(R0); 435 436 __ load_const_optimized(R0, _trap_request); // Pass trap request in R0. 437 __ bctrl(); 438 ce->add_call_info_here(_info); 439 debug_only(__ illtrap()); 440 } 441 442 443 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 444 //---------------slow case: call to native----------------- 445 __ bind(_entry); 446 __ mr(R3_ARG1, src()->as_register()); 447 __ extsw(R4_ARG2, src_pos()->as_register()); 448 __ mr(R5_ARG3, dst()->as_register()); 449 __ extsw(R6_ARG4, dst_pos()->as_register()); 450 __ extsw(R7_ARG5, length()->as_register()); 451 452 ce->emit_static_call_stub(); 453 454 bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub()); 455 if (!success) { return; } 456 457 __ relocate(relocInfo::static_call_type); 458 // Note: At this point we do not have the address of the trampoline 459 // stub, and the entry point might be too far away for bl, so __ pc() 460 // serves as dummy and the bl will be patched later. 461 __ code()->set_insts_mark(); 462 __ bl(__ pc()); 463 ce->add_call_info_here(info()); 464 ce->verify_oop_map(info()); 465 466 #ifndef PRODUCT 467 const address counter = (address)&Runtime1::_arraycopy_slowcase_cnt; 468 const Register tmp = R3, tmp2 = R4; 469 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 470 __ lwz(tmp2, simm16_offs, tmp); 471 __ addi(tmp2, tmp2, 1); 472 __ stw(tmp2, simm16_offs, tmp); 473 #endif 474 475 __ b(_continuation); 476 } 477 478 #undef __