1 /* 2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2018 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_CodeStubs.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "nativeInst_s390.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "utilities/align.hpp" 35 #include "utilities/macros.hpp" 36 #include "vmreg_s390.inline.hpp" 37 #if INCLUDE_ALL_GCS 38 #include "gc/g1/g1BarrierSet.hpp" 39 #endif // INCLUDE_ALL_GCS 40 41 #define __ ce->masm()-> 42 #undef CHECK_BAILOUT 43 #define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; } 44 45 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 46 : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { 47 assert(info != NULL, "must have info"); 48 _info = new CodeEmitInfo(info); 49 } 50 51 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) 52 : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { 53 assert(info != NULL, "must have info"); 54 _info = new CodeEmitInfo(info); 55 } 56 57 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 58 __ bind(_entry); 59 if (_info->deoptimize_on_exception()) { 60 address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id); 61 ce->emit_call_c(a); 62 CHECK_BAILOUT(); 63 ce->add_call_info_here(_info); 64 ce->verify_oop_map(_info); 65 debug_only(__ should_not_reach_here()); 66 return; 67 } 68 69 // Pass the array index in Z_R1_scratch which is not managed by linear scan. 70 if (_index->is_cpu_register()) { 71 __ lgr_if_needed(Z_R1_scratch, _index->as_register()); 72 } else { 73 __ load_const_optimized(Z_R1_scratch, _index->as_jint()); 74 } 75 76 Runtime1::StubID stub_id; 77 if (_throw_index_out_of_bounds_exception) { 78 stub_id = Runtime1::throw_index_exception_id; 79 } else { 80 stub_id = Runtime1::throw_range_check_failed_id; 81 __ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register()); 82 } 83 ce->emit_call_c(Runtime1::entry_for (stub_id)); 84 CHECK_BAILOUT(); 85 ce->add_call_info_here(_info); 86 ce->verify_oop_map(_info); 87 debug_only(__ should_not_reach_here()); 88 } 89 90 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 91 _info = new CodeEmitInfo(info); 92 } 93 94 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 95 __ bind(_entry); 96 address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id); 97 ce->emit_call_c(a); 98 CHECK_BAILOUT(); 99 ce->add_call_info_here(_info); 100 ce->verify_oop_map(_info); 101 debug_only(__ should_not_reach_here()); 102 } 103 104 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 105 __ bind(_entry); 106 Metadata *m = _method->as_constant_ptr()->as_metadata(); 107 bool success = __ set_metadata_constant(m, Z_R1_scratch); 108 if (!success) { 109 ce->compilation()->bailout("const section overflow"); 110 return; 111 } 112 ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1); 113 ce->store_parameter(_bci, 0); 114 ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id)); 115 CHECK_BAILOUT(); 116 ce->add_call_info_here(_info); 117 ce->verify_oop_map(_info); 118 __ branch_optimized(Assembler::bcondAlways, _continuation); 119 } 120 121 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 122 if (_offset != -1) { 123 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 124 } 125 __ bind(_entry); 126 ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id)); 127 CHECK_BAILOUT(); 128 ce->add_call_info_here(_info); 129 debug_only(__ should_not_reach_here()); 130 } 131 132 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 133 address a; 134 if (_info->deoptimize_on_exception()) { 135 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 136 a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id); 137 } else { 138 a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id); 139 } 140 141 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 142 __ bind(_entry); 143 ce->emit_call_c(a); 144 CHECK_BAILOUT(); 145 ce->add_call_info_here(_info); 146 ce->verify_oop_map(_info); 147 debug_only(__ should_not_reach_here()); 148 } 149 150 // Note: pass object in Z_R1_scratch 151 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 152 __ bind(_entry); 153 if (_obj->is_valid()) { 154 __ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub 155 } 156 address a = Runtime1::entry_for (_stub); 157 ce->emit_call_c(a); 158 CHECK_BAILOUT(); 159 ce->add_call_info_here(_info); 160 debug_only(__ should_not_reach_here()); 161 } 162 163 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 164 _result = result; 165 _klass = klass; 166 _klass_reg = klass_reg; 167 _info = new CodeEmitInfo(info); 168 assert(stub_id == Runtime1::new_instance_id || 169 stub_id == Runtime1::fast_new_instance_id || 170 stub_id == Runtime1::fast_new_instance_init_check_id, 171 "need new_instance id"); 172 _stub_id = stub_id; 173 } 174 175 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 176 __ bind(_entry); 177 assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); 178 address a = Runtime1::entry_for (_stub_id); 179 ce->emit_call_c(a); 180 CHECK_BAILOUT(); 181 ce->add_call_info_here(_info); 182 ce->verify_oop_map(_info); 183 assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,"); 184 __ z_brul(_continuation); 185 } 186 187 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 188 _klass_reg = klass_reg; 189 _length = length; 190 _result = result; 191 _info = new CodeEmitInfo(info); 192 } 193 194 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 195 __ bind(_entry); 196 assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); 197 __ lgr_if_needed(Z_R13, _length->as_register()); 198 address a = Runtime1::entry_for (Runtime1::new_type_array_id); 199 ce->emit_call_c(a); 200 CHECK_BAILOUT(); 201 ce->add_call_info_here(_info); 202 ce->verify_oop_map(_info); 203 assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,"); 204 __ z_brul(_continuation); 205 } 206 207 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 208 _klass_reg = klass_reg; 209 _length = length; 210 _result = result; 211 _info = new CodeEmitInfo(info); 212 } 213 214 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 215 __ bind(_entry); 216 assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); 217 __ lgr_if_needed(Z_R13, _length->as_register()); 218 address a = Runtime1::entry_for (Runtime1::new_object_array_id); 219 ce->emit_call_c(a); 220 CHECK_BAILOUT(); 221 ce->add_call_info_here(_info); 222 ce->verify_oop_map(_info); 223 assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,"); 224 __ z_brul(_continuation); 225 } 226 227 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 228 : MonitorAccessStub(obj_reg, lock_reg) { 229 _info = new CodeEmitInfo(info); 230 } 231 232 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 233 __ bind(_entry); 234 Runtime1::StubID enter_id; 235 if (ce->compilation()->has_fpu_code()) { 236 enter_id = Runtime1::monitorenter_id; 237 } else { 238 enter_id = Runtime1::monitorenter_nofpu_id; 239 } 240 __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register()); 241 __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr(). 242 ce->emit_call_c(Runtime1::entry_for (enter_id)); 243 CHECK_BAILOUT(); 244 ce->add_call_info_here(_info); 245 ce->verify_oop_map(_info); 246 __ branch_optimized(Assembler::bcondAlways, _continuation); 247 } 248 249 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 250 __ bind(_entry); 251 // Move address of the BasicObjectLock into Z_R1_scratch. 252 if (_compute_lock) { 253 // Lock_reg was destroyed by fast unlocking attempt => recompute it. 254 ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch)); 255 } else { 256 __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register()); 257 } 258 // Note: non-blocking leaf routine => no call info needed. 259 Runtime1::StubID exit_id; 260 if (ce->compilation()->has_fpu_code()) { 261 exit_id = Runtime1::monitorexit_id; 262 } else { 263 exit_id = Runtime1::monitorexit_nofpu_id; 264 } 265 ce->emit_call_c(Runtime1::entry_for (exit_id)); 266 CHECK_BAILOUT(); 267 __ branch_optimized(Assembler::bcondAlways, _continuation); 268 } 269 270 // Implementation of patching: 271 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes). 272 // - Replace original code with a call to the stub. 273 // At Runtime: 274 // - call to stub, jump to runtime. 275 // - in runtime: Preserve all registers (especially objects, i.e., source and destination object). 276 // - in runtime: After initializing class, restore original code, reexecute instruction. 277 278 int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/); 279 280 void PatchingStub::align_patch_site(MacroAssembler* masm) { 281 #ifndef PRODUCT 282 const char* bc; 283 switch (_id) { 284 case access_field_id: bc = "patch site (access_field)"; break; 285 case load_klass_id: bc = "patch site (load_klass)"; break; 286 case load_mirror_id: bc = "patch site (load_mirror)"; break; 287 case load_appendix_id: bc = "patch site (load_appendix)"; break; 288 default: bc = "patch site (unknown patch id)"; break; 289 } 290 masm->block_comment(bc); 291 #endif 292 293 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize)); 294 } 295 296 void PatchingStub::emit_code(LIR_Assembler* ce) { 297 // Copy original code here. 298 assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, 299 "not enough room for call"); 300 301 NearLabel call_patch; 302 303 int being_initialized_entry = __ offset(); 304 305 if (_id == load_klass_id) { 306 // Produce a copy of the load klass instruction for use by the case being initialized. 307 #ifdef ASSERT 308 address start = __ pc(); 309 #endif 310 AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index)); 311 __ load_const(_obj, addrlit); 312 313 #ifdef ASSERT 314 for (int i = 0; i < _bytes_to_copy; i++) { 315 address ptr = (address)(_pc_start + i); 316 int a_byte = (*ptr) & 0xFF; 317 assert(a_byte == *start++, "should be the same code"); 318 } 319 #endif 320 } else if (_id == load_mirror_id || _id == load_appendix_id) { 321 // Produce a copy of the load mirror instruction for use by the case being initialized. 322 #ifdef ASSERT 323 address start = __ pc(); 324 #endif 325 AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index)); 326 __ load_const(_obj, addrlit); 327 328 #ifdef ASSERT 329 for (int i = 0; i < _bytes_to_copy; i++) { 330 address ptr = (address)(_pc_start + i); 331 int a_byte = (*ptr) & 0xFF; 332 assert(a_byte == *start++, "should be the same code"); 333 } 334 #endif 335 } else { 336 // Make a copy the code which is going to be patched. 337 for (int i = 0; i < _bytes_to_copy; i++) { 338 address ptr = (address)(_pc_start + i); 339 int a_byte = (*ptr) & 0xFF; 340 __ emit_int8 (a_byte); 341 } 342 } 343 344 address end_of_patch = __ pc(); 345 int bytes_to_skip = 0; 346 if (_id == load_mirror_id) { 347 int offset = __ offset(); 348 if (CommentedAssembly) { 349 __ block_comment(" being_initialized check"); 350 } 351 352 // Static field accesses have special semantics while the class 353 // initializer is being run, so we emit a test which can be used to 354 // check that this code is being executed by the initializing 355 // thread. 356 assert(_obj != noreg, "must be a valid register"); 357 assert(_index >= 0, "must have oop index"); 358 __ z_lg(Z_R1_scratch, java_lang_Class::klass_offset_in_bytes(), _obj); 359 __ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset())); 360 __ branch_optimized(Assembler::bcondNotEqual, call_patch); 361 362 // Load_klass patches may execute the patched code before it's 363 // copied back into place so we need to jump back into the main 364 // code of the nmethod to continue execution. 365 __ branch_optimized(Assembler::bcondAlways, _patch_site_continuation); 366 367 // Make sure this extra code gets skipped. 368 bytes_to_skip += __ offset() - offset; 369 } 370 371 // Now emit the patch record telling the runtime how to find the 372 // pieces of the patch. We only need 3 bytes but to help the disassembler 373 // we make the data look like a the following add instruction: 374 // A R1, D2(X2, B2) 375 // which requires 4 bytes. 376 int sizeof_patch_record = 4; 377 bytes_to_skip += sizeof_patch_record; 378 379 // Emit the offsets needed to find the code to patch. 380 int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record; 381 382 // Emit the patch record: opcode of the add followed by 3 bytes patch record data. 383 __ emit_int8((int8_t)(A_ZOPC>>24)); 384 __ emit_int8(being_initialized_entry_offset); 385 __ emit_int8(bytes_to_skip); 386 __ emit_int8(_bytes_to_copy); 387 address patch_info_pc = __ pc(); 388 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 389 390 address entry = __ pc(); 391 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 392 address target = NULL; 393 relocInfo::relocType reloc_type = relocInfo::none; 394 switch (_id) { 395 case access_field_id: target = Runtime1::entry_for (Runtime1::access_field_patching_id); break; 396 case load_klass_id: target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; 397 case load_mirror_id: target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; 398 case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; 399 default: ShouldNotReachHere(); 400 } 401 __ bind(call_patch); 402 403 if (CommentedAssembly) { 404 __ block_comment("patch entry point"); 405 } 406 // Cannot use call_c_opt() because its size is not constant. 407 __ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant. 408 __ z_basr(Z_R14, Z_R1_scratch); 409 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 410 ce->add_call_info_here(_info); 411 __ z_brcl(Assembler::bcondAlways, _patch_site_entry); 412 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { 413 CodeSection* cs = __ code_section(); 414 address pc = (address)_pc_start; 415 RelocIterator iter(cs, pc, pc + 1); 416 relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none); 417 } 418 } 419 420 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 421 __ bind(_entry); 422 __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch. 423 ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id)); 424 CHECK_BAILOUT(); 425 ce->add_call_info_here(_info); 426 DEBUG_ONLY(__ should_not_reach_here()); 427 } 428 429 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 430 // Slow case: call to native. 431 __ bind(_entry); 432 __ lgr_if_needed(Z_ARG1, src()->as_register()); 433 __ lgr_if_needed(Z_ARG2, src_pos()->as_register()); 434 __ lgr_if_needed(Z_ARG3, dst()->as_register()); 435 __ lgr_if_needed(Z_ARG4, dst_pos()->as_register()); 436 __ lgr_if_needed(Z_ARG5, length()->as_register()); 437 438 // Must align calls sites, otherwise they can't be updated atomically on MP hardware. 439 ce->align_call(lir_static_call); 440 441 assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0, 442 "must be aligned"); 443 444 ce->emit_static_call_stub(); 445 446 // Prepend each BRASL with a nop. 447 __ relocate(relocInfo::static_call_type); 448 __ z_nop(); 449 __ z_brasl(Z_R14, SharedRuntime::get_resolve_static_call_stub()); 450 ce->add_call_info_here(info()); 451 ce->verify_oop_map(info()); 452 453 #ifndef PRODUCT 454 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_slowcase_cnt); 455 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 456 #endif 457 458 __ branch_optimized(Assembler::bcondAlways, _continuation); 459 } 460 461 462 /////////////////////////////////////////////////////////////////////////////////// 463 #if INCLUDE_ALL_GCS 464 465 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { 466 // At this point we know that marking is in progress. 467 // If do_load() is true then we have to emit the 468 // load of the previous value; otherwise it has already 469 // been loaded into _pre_val. 470 __ bind(_entry); 471 ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots. 472 assert(pre_val()->is_register(), "Precondition."); 473 474 Register pre_val_reg = pre_val()->as_register(); 475 476 if (do_load()) { 477 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 478 } 479 480 __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id. 481 __ branch_optimized(Assembler::bcondZero, _continuation); 482 ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_pre_barrier_slow_id)); 483 CHECK_BAILOUT(); 484 __ branch_optimized(Assembler::bcondAlways, _continuation); 485 } 486 487 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { 488 __ bind(_entry); 489 ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots. 490 assert(addr()->is_register(), "Precondition."); 491 assert(new_val()->is_register(), "Precondition."); 492 Register new_val_reg = new_val()->as_register(); 493 __ z_ltgr(new_val_reg, new_val_reg); 494 __ branch_optimized(Assembler::bcondZero, _continuation); 495 __ z_lgr(Z_R1_scratch, addr()->as_pointer_register()); 496 ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_post_barrier_slow_id)); 497 CHECK_BAILOUT(); 498 __ branch_optimized(Assembler::bcondAlways, _continuation); 499 } 500 501 #endif // INCLUDE_ALL_GCS 502 503 #undef __