1 /* 2 * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "code/codeCache.hpp" 30 #include "code/debugInfoRec.hpp" 31 #include "code/icBuffer.hpp" 32 #include "code/vtableStubs.hpp" 33 #include "gc/shared/barrierSetAssembler.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "interpreter/interp_masm.hpp" 36 #include "logging/log.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "nativeInst_aarch64.hpp" 39 #include "oops/compiledICHolder.hpp" 40 #include "oops/klass.inline.hpp" 41 #include "runtime/safepointMechanism.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/vframeArray.hpp" 44 #include "utilities/align.hpp" 45 #include "vmreg_aarch64.inline.hpp" 46 #ifdef COMPILER1 47 #include "c1/c1_Runtime1.hpp" 48 #endif 49 #ifdef COMPILER2 50 #include "adfiles/ad_aarch64.hpp" 51 #include "opto/runtime.hpp" 52 #endif 53 #if INCLUDE_JVMCI 54 #include "jvmci/jvmciJavaClasses.hpp" 55 #endif 56 57 #define __ masm-> 58 59 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; 60 61 class SimpleRuntimeFrame { 62 63 public: 64 65 // Most of the runtime stubs have this simple frame layout. 66 // This class exists to make the layout shared in one place. 67 // Offsets are for compiler stack slots, which are jints. 68 enum layout { 69 // The frame sender code expects that rbp will be in the "natural" place and 70 // will override any oopMap setting for it. We must therefore force the layout 71 // so that it agrees with the frame sender code. 72 // we don't expect any arg reg save area so aarch64 asserts that 73 // frame::arg_reg_save_area_bytes == 0 74 rbp_off = 0, 75 rbp_off2, 76 return_off, return_off2, 77 framesize 78 }; 79 }; 80 81 // FIXME -- this is used by C1 82 class RegisterSaver { 83 public: 84 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false); 85 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false); 86 87 // Offsets into the register save area 88 // Used by deoptimization when it is managing result register 89 // values on its own 90 91 static int r0_offset_in_bytes(void) { return (32 + r0->encoding()) * wordSize; } 92 static int reg_offset_in_bytes(Register r) { return r0_offset_in_bytes() + r->encoding() * wordSize; } 93 static int rmethod_offset_in_bytes(void) { return reg_offset_in_bytes(rmethod); } 94 static int rscratch1_offset_in_bytes(void) { return (32 + rscratch1->encoding()) * wordSize; } 95 static int v0_offset_in_bytes(void) { return 0; } 96 static int return_offset_in_bytes(void) { return (32 /* floats*/ + 31 /* gregs*/) * wordSize; } 97 98 // During deoptimization only the result registers need to be restored, 99 // all the other values have already been extracted. 100 static void restore_result_registers(MacroAssembler* masm); 101 102 // Capture info about frame layout 103 enum layout { 104 fpu_state_off = 0, 105 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1, 106 // The frame sender code expects that rfp will be in 107 // the "natural" place and will override any oopMap 108 // setting for it. We must therefore force the layout 109 // so that it agrees with the frame sender code. 110 r0_off = fpu_state_off + FPUStateSizeInWords, 111 rfp_off = r0_off + (RegisterImpl::number_of_registers - 2) * RegisterImpl::max_slots_per_register, 112 return_off = rfp_off + RegisterImpl::max_slots_per_register, // slot for return address 113 reg_save_size = return_off + RegisterImpl::max_slots_per_register}; 114 115 }; 116 117 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) { 118 #if COMPILER2_OR_JVMCI 119 if (save_vectors) { 120 // Save upper half of vector registers 121 int vect_words = FloatRegisterImpl::number_of_registers * FloatRegisterImpl::extra_save_slots_per_neon_register / 122 VMRegImpl::slots_per_word; 123 additional_frame_words += vect_words; 124 } 125 #else 126 assert(!save_vectors, "vectors are generated only by C2 and JVMCI"); 127 #endif 128 129 int frame_size_in_bytes = align_up(additional_frame_words * wordSize + 130 reg_save_size * BytesPerInt, 16); 131 // OopMap frame size is in compiler stack slots (jint's) not bytes or words 132 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 133 // The caller will allocate additional_frame_words 134 int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt; 135 // CodeBlob frame size is in words. 136 int frame_size_in_words = frame_size_in_bytes / wordSize; 137 *total_frame_words = frame_size_in_words; 138 139 // Save Integer and Float registers. 140 __ enter(); 141 __ push_CPU_state(save_vectors); 142 143 // Set an oopmap for the call site. This oopmap will map all 144 // oop-registers and debug-info registers as callee-saved. This 145 // will allow deoptimization at this safepoint to find all possible 146 // debug-info recordings, as well as let GC find all oops. 147 148 OopMapSet *oop_maps = new OopMapSet(); 149 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 150 151 for (int i = 0; i < RegisterImpl::number_of_registers; i++) { 152 Register r = as_Register(i); 153 if (r <= rfp && r != rscratch1 && r != rscratch2) { 154 // SP offsets are in 4-byte words. 155 // Register slots are 8 bytes wide, 32 floating-point registers. 156 int sp_offset = RegisterImpl::max_slots_per_register * i + 157 FloatRegisterImpl::save_slots_per_register * FloatRegisterImpl::number_of_registers; 158 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), 159 r->as_VMReg()); 160 } 161 } 162 163 for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) { 164 FloatRegister r = as_FloatRegister(i); 165 int sp_offset = save_vectors ? (FloatRegisterImpl::max_slots_per_register * i) : 166 (FloatRegisterImpl::save_slots_per_register * i); 167 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 168 r->as_VMReg()); 169 } 170 171 return oop_map; 172 } 173 174 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) { 175 #if !COMPILER2_OR_JVMCI 176 assert(!restore_vectors, "vectors are generated only by C2 and JVMCI"); 177 #endif 178 __ pop_CPU_state(restore_vectors); 179 __ leave(); 180 181 } 182 183 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 184 185 // Just restore result register. Only used by deoptimization. By 186 // now any callee save register that needs to be restored to a c2 187 // caller of the deoptee has been extracted into the vframeArray 188 // and will be stuffed into the c2i adapter we create for later 189 // restoration so only result registers need to be restored here. 190 191 // Restore fp result register 192 __ ldrd(v0, Address(sp, v0_offset_in_bytes())); 193 // Restore integer result register 194 __ ldr(r0, Address(sp, r0_offset_in_bytes())); 195 196 // Pop all of the register save are off the stack 197 __ add(sp, sp, align_up(return_offset_in_bytes(), 16)); 198 } 199 200 // Is vector's size (in bytes) bigger than a size saved by default? 201 // 8 bytes vector registers are saved by default on AArch64. 202 bool SharedRuntime::is_wide_vector(int size) { 203 return size > 8; 204 } 205 206 size_t SharedRuntime::trampoline_size() { 207 return 16; 208 } 209 210 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { 211 __ mov(rscratch1, destination); 212 __ br(rscratch1); 213 } 214 215 // The java_calling_convention describes stack locations as ideal slots on 216 // a frame with no abi restrictions. Since we must observe abi restrictions 217 // (like the placement of the register window) the slots must be biased by 218 // the following value. 219 static int reg2offset_in(VMReg r) { 220 // Account for saved rfp and lr 221 // This should really be in_preserve_stack_slots 222 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 223 } 224 225 static int reg2offset_out(VMReg r) { 226 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 227 } 228 229 // --------------------------------------------------------------------------- 230 // Read the array of BasicTypes from a signature, and compute where the 231 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 232 // quantities. Values less than VMRegImpl::stack0 are registers, those above 233 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 234 // as framesizes are fixed. 235 // VMRegImpl::stack0 refers to the first slot 0(sp). 236 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 237 // up to RegisterImpl::number_of_registers) are the 64-bit 238 // integer registers. 239 240 // Note: the INPUTS in sig_bt are in units of Java argument words, 241 // which are 64-bit. The OUTPUTS are in 32-bit units. 242 243 // The Java calling convention is a "shifted" version of the C ABI. 244 // By skipping the first C ABI register we can call non-static jni 245 // methods with small numbers of arguments without having to shuffle 246 // the arguments at all. Since we control the java ABI we ought to at 247 // least get some advantage out of it. 248 249 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 250 VMRegPair *regs, 251 int total_args_passed, 252 int is_outgoing) { 253 254 // Create the mapping between argument positions and 255 // registers. 256 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { 257 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7 258 }; 259 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = { 260 j_farg0, j_farg1, j_farg2, j_farg3, 261 j_farg4, j_farg5, j_farg6, j_farg7 262 }; 263 264 265 uint int_args = 0; 266 uint fp_args = 0; 267 uint stk_args = 0; // inc by 2 each time 268 269 for (int i = 0; i < total_args_passed; i++) { 270 switch (sig_bt[i]) { 271 case T_BOOLEAN: 272 case T_CHAR: 273 case T_BYTE: 274 case T_SHORT: 275 case T_INT: 276 if (int_args < Argument::n_int_register_parameters_j) { 277 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 278 } else { 279 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 280 stk_args += 2; 281 } 282 break; 283 case T_VOID: 284 // halves of T_LONG or T_DOUBLE 285 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 286 regs[i].set_bad(); 287 break; 288 case T_LONG: 289 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 290 // fall through 291 case T_OBJECT: 292 case T_ARRAY: 293 case T_ADDRESS: 294 if (int_args < Argument::n_int_register_parameters_j) { 295 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 296 } else { 297 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 298 stk_args += 2; 299 } 300 break; 301 case T_FLOAT: 302 if (fp_args < Argument::n_float_register_parameters_j) { 303 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 304 } else { 305 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 306 stk_args += 2; 307 } 308 break; 309 case T_DOUBLE: 310 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 311 if (fp_args < Argument::n_float_register_parameters_j) { 312 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 313 } else { 314 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 315 stk_args += 2; 316 } 317 break; 318 default: 319 ShouldNotReachHere(); 320 break; 321 } 322 } 323 324 return align_up(stk_args, 2); 325 } 326 327 // Patch the callers callsite with entry to compiled code if it exists. 328 static void patch_callers_callsite(MacroAssembler *masm) { 329 Label L; 330 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); 331 __ cbz(rscratch1, L); 332 333 __ enter(); 334 __ push_CPU_state(); 335 336 // VM needs caller's callsite 337 // VM needs target method 338 // This needs to be a long call since we will relocate this adapter to 339 // the codeBuffer and it may not reach 340 341 #ifndef PRODUCT 342 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 343 #endif 344 345 __ mov(c_rarg0, rmethod); 346 __ mov(c_rarg1, lr); 347 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); 348 __ blr(rscratch1); 349 __ maybe_isb(); 350 351 __ pop_CPU_state(); 352 // restore sp 353 __ leave(); 354 __ bind(L); 355 } 356 357 static void gen_c2i_adapter(MacroAssembler *masm, 358 int total_args_passed, 359 int comp_args_on_stack, 360 const BasicType *sig_bt, 361 const VMRegPair *regs, 362 Label& skip_fixup) { 363 // Before we get into the guts of the C2I adapter, see if we should be here 364 // at all. We've come from compiled code and are attempting to jump to the 365 // interpreter, which means the caller made a static call to get here 366 // (vcalls always get a compiled target if there is one). Check for a 367 // compiled target. If there is one, we need to patch the caller's call. 368 patch_callers_callsite(masm); 369 370 __ bind(skip_fixup); 371 372 int words_pushed = 0; 373 374 // Since all args are passed on the stack, total_args_passed * 375 // Interpreter::stackElementSize is the space we need. 376 377 int extraspace = total_args_passed * Interpreter::stackElementSize; 378 379 __ mov(r13, sp); 380 381 // stack is aligned, keep it that way 382 extraspace = align_up(extraspace, 2*wordSize); 383 384 if (extraspace) 385 __ sub(sp, sp, extraspace); 386 387 // Now write the args into the outgoing interpreter space 388 for (int i = 0; i < total_args_passed; i++) { 389 if (sig_bt[i] == T_VOID) { 390 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 391 continue; 392 } 393 394 // offset to start parameters 395 int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; 396 int next_off = st_off - Interpreter::stackElementSize; 397 398 // Say 4 args: 399 // i st_off 400 // 0 32 T_LONG 401 // 1 24 T_VOID 402 // 2 16 T_OBJECT 403 // 3 8 T_BOOL 404 // - 0 return address 405 // 406 // However to make thing extra confusing. Because we can fit a Java long/double in 407 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter 408 // leaves one slot empty and only stores to a single slot. In this case the 409 // slot that is occupied is the T_VOID slot. See I said it was confusing. 410 411 VMReg r_1 = regs[i].first(); 412 VMReg r_2 = regs[i].second(); 413 if (!r_1->is_valid()) { 414 assert(!r_2->is_valid(), ""); 415 continue; 416 } 417 if (r_1->is_stack()) { 418 // memory to memory use rscratch1 419 int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size 420 + extraspace 421 + words_pushed * wordSize); 422 if (!r_2->is_valid()) { 423 // sign extend?? 424 __ ldrw(rscratch1, Address(sp, ld_off)); 425 __ str(rscratch1, Address(sp, st_off)); 426 427 } else { 428 429 __ ldr(rscratch1, Address(sp, ld_off)); 430 431 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG 432 // T_DOUBLE and T_LONG use two slots in the interpreter 433 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 434 // ld_off == LSW, ld_off+wordSize == MSW 435 // st_off == MSW, next_off == LSW 436 __ str(rscratch1, Address(sp, next_off)); 437 #ifdef ASSERT 438 // Overwrite the unused slot with known junk 439 __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull); 440 __ str(rscratch1, Address(sp, st_off)); 441 #endif /* ASSERT */ 442 } else { 443 __ str(rscratch1, Address(sp, st_off)); 444 } 445 } 446 } else if (r_1->is_Register()) { 447 Register r = r_1->as_Register(); 448 if (!r_2->is_valid()) { 449 // must be only an int (or less ) so move only 32bits to slot 450 // why not sign extend?? 451 __ str(r, Address(sp, st_off)); 452 } else { 453 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG 454 // T_DOUBLE and T_LONG use two slots in the interpreter 455 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 456 // jlong/double in gpr 457 #ifdef ASSERT 458 // Overwrite the unused slot with known junk 459 __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull); 460 __ str(rscratch1, Address(sp, st_off)); 461 #endif /* ASSERT */ 462 __ str(r, Address(sp, next_off)); 463 } else { 464 __ str(r, Address(sp, st_off)); 465 } 466 } 467 } else { 468 assert(r_1->is_FloatRegister(), ""); 469 if (!r_2->is_valid()) { 470 // only a float use just part of the slot 471 __ strs(r_1->as_FloatRegister(), Address(sp, st_off)); 472 } else { 473 #ifdef ASSERT 474 // Overwrite the unused slot with known junk 475 __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull); 476 __ str(rscratch1, Address(sp, st_off)); 477 #endif /* ASSERT */ 478 __ strd(r_1->as_FloatRegister(), Address(sp, next_off)); 479 } 480 } 481 } 482 483 __ mov(esp, sp); // Interp expects args on caller's expression stack 484 485 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset()))); 486 __ br(rscratch1); 487 } 488 489 490 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 491 int total_args_passed, 492 int comp_args_on_stack, 493 const BasicType *sig_bt, 494 const VMRegPair *regs) { 495 496 // Note: r13 contains the senderSP on entry. We must preserve it since 497 // we may do a i2c -> c2i transition if we lose a race where compiled 498 // code goes non-entrant while we get args ready. 499 500 // In addition we use r13 to locate all the interpreter args because 501 // we must align the stack to 16 bytes. 502 503 // Adapters are frameless. 504 505 // An i2c adapter is frameless because the *caller* frame, which is 506 // interpreted, routinely repairs its own esp (from 507 // interpreter_frame_last_sp), even if a callee has modified the 508 // stack pointer. It also recalculates and aligns sp. 509 510 // A c2i adapter is frameless because the *callee* frame, which is 511 // interpreted, routinely repairs its caller's sp (from sender_sp, 512 // which is set up via the senderSP register). 513 514 // In other words, if *either* the caller or callee is interpreted, we can 515 // get the stack pointer repaired after a call. 516 517 // This is why c2i and i2c adapters cannot be indefinitely composed. 518 // In particular, if a c2i adapter were to somehow call an i2c adapter, 519 // both caller and callee would be compiled methods, and neither would 520 // clean up the stack pointer changes performed by the two adapters. 521 // If this happens, control eventually transfers back to the compiled 522 // caller, but with an uncorrected stack, causing delayed havoc. 523 524 if (VerifyAdapterCalls && 525 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { 526 #if 0 527 // So, let's test for cascading c2i/i2c adapters right now. 528 // assert(Interpreter::contains($return_addr) || 529 // StubRoutines::contains($return_addr), 530 // "i2c adapter must return to an interpreter frame"); 531 __ block_comment("verify_i2c { "); 532 Label L_ok; 533 if (Interpreter::code() != NULL) 534 range_check(masm, rax, r11, 535 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 536 L_ok); 537 if (StubRoutines::code1() != NULL) 538 range_check(masm, rax, r11, 539 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), 540 L_ok); 541 if (StubRoutines::code2() != NULL) 542 range_check(masm, rax, r11, 543 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), 544 L_ok); 545 const char* msg = "i2c adapter must return to an interpreter frame"; 546 __ block_comment(msg); 547 __ stop(msg); 548 __ bind(L_ok); 549 __ block_comment("} verify_i2ce "); 550 #endif 551 } 552 553 // Cut-out for having no stack args. 554 int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 555 if (comp_args_on_stack) { 556 __ sub(rscratch1, sp, comp_words_on_stack * wordSize); 557 __ andr(sp, rscratch1, -16); 558 } 559 560 // Will jump to the compiled code just as if compiled code was doing it. 561 // Pre-load the register-jump target early, to schedule it better. 562 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset()))); 563 564 #if INCLUDE_JVMCI 565 if (EnableJVMCI || UseAOT) { 566 // check if this call should be routed towards a specific entry point 567 __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 568 Label no_alternative_target; 569 __ cbz(rscratch2, no_alternative_target); 570 __ mov(rscratch1, rscratch2); 571 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 572 __ bind(no_alternative_target); 573 } 574 #endif // INCLUDE_JVMCI 575 576 // Now generate the shuffle code. 577 for (int i = 0; i < total_args_passed; i++) { 578 if (sig_bt[i] == T_VOID) { 579 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 580 continue; 581 } 582 583 // Pick up 0, 1 or 2 words from SP+offset. 584 585 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 586 "scrambled load targets?"); 587 // Load in argument order going down. 588 int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize; 589 // Point to interpreter value (vs. tag) 590 int next_off = ld_off - Interpreter::stackElementSize; 591 // 592 // 593 // 594 VMReg r_1 = regs[i].first(); 595 VMReg r_2 = regs[i].second(); 596 if (!r_1->is_valid()) { 597 assert(!r_2->is_valid(), ""); 598 continue; 599 } 600 if (r_1->is_stack()) { 601 // Convert stack slot to an SP offset (+ wordSize to account for return address ) 602 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size; 603 if (!r_2->is_valid()) { 604 // sign extend??? 605 __ ldrsw(rscratch2, Address(esp, ld_off)); 606 __ str(rscratch2, Address(sp, st_off)); 607 } else { 608 // 609 // We are using two optoregs. This can be either T_OBJECT, 610 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates 611 // two slots but only uses one for thr T_LONG or T_DOUBLE case 612 // So we must adjust where to pick up the data to match the 613 // interpreter. 614 // 615 // Interpreter local[n] == MSW, local[n+1] == LSW however locals 616 // are accessed as negative so LSW is at LOW address 617 618 // ld_off is MSW so get LSW 619 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? 620 next_off : ld_off; 621 __ ldr(rscratch2, Address(esp, offset)); 622 // st_off is LSW (i.e. reg.first()) 623 __ str(rscratch2, Address(sp, st_off)); 624 } 625 } else if (r_1->is_Register()) { // Register argument 626 Register r = r_1->as_Register(); 627 if (r_2->is_valid()) { 628 // 629 // We are using two VMRegs. This can be either T_OBJECT, 630 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates 631 // two slots but only uses one for thr T_LONG or T_DOUBLE case 632 // So we must adjust where to pick up the data to match the 633 // interpreter. 634 635 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? 636 next_off : ld_off; 637 638 // this can be a misaligned move 639 __ ldr(r, Address(esp, offset)); 640 } else { 641 // sign extend and use a full word? 642 __ ldrw(r, Address(esp, ld_off)); 643 } 644 } else { 645 if (!r_2->is_valid()) { 646 __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off)); 647 } else { 648 __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off)); 649 } 650 } 651 } 652 653 // 6243940 We might end up in handle_wrong_method if 654 // the callee is deoptimized as we race thru here. If that 655 // happens we don't want to take a safepoint because the 656 // caller frame will look interpreted and arguments are now 657 // "compiled" so it is much better to make this transition 658 // invisible to the stack walking code. Unfortunately if 659 // we try and find the callee by normal means a safepoint 660 // is possible. So we stash the desired callee in the thread 661 // and the vm will find there should this case occur. 662 663 __ str(rmethod, Address(rthread, JavaThread::callee_target_offset())); 664 665 __ br(rscratch1); 666 } 667 668 // --------------------------------------------------------------- 669 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 670 int total_args_passed, 671 int comp_args_on_stack, 672 const BasicType *sig_bt, 673 const VMRegPair *regs, 674 AdapterFingerPrint* fingerprint) { 675 address i2c_entry = __ pc(); 676 677 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 678 679 address c2i_unverified_entry = __ pc(); 680 Label skip_fixup; 681 682 Label ok; 683 684 Register holder = rscratch2; 685 Register receiver = j_rarg0; 686 Register tmp = r10; // A call-clobbered register not used for arg passing 687 688 // ------------------------------------------------------------------------- 689 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls 690 // to the interpreter. The args start out packed in the compiled layout. They 691 // need to be unpacked into the interpreter layout. This will almost always 692 // require some stack space. We grow the current (compiled) stack, then repack 693 // the args. We finally end in a jump to the generic interpreter entry point. 694 // On exit from the interpreter, the interpreter will restore our SP (lest the 695 // compiled code, which relys solely on SP and not FP, get sick). 696 697 { 698 __ block_comment("c2i_unverified_entry {"); 699 __ load_klass(rscratch1, receiver); 700 __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset())); 701 __ cmp(rscratch1, tmp); 702 __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset())); 703 __ br(Assembler::EQ, ok); 704 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 705 706 __ bind(ok); 707 // Method might have been compiled since the call site was patched to 708 // interpreted; if that is the case treat it as a miss so we can get 709 // the call site corrected. 710 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); 711 __ cbz(rscratch1, skip_fixup); 712 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 713 __ block_comment("} c2i_unverified_entry"); 714 } 715 716 address c2i_entry = __ pc(); 717 718 // Class initialization barrier for static methods 719 address c2i_no_clinit_check_entry = NULL; 720 if (VM_Version::supports_fast_class_init_checks()) { 721 Label L_skip_barrier; 722 723 { // Bypass the barrier for non-static methods 724 __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset())); 725 __ andsw(zr, rscratch1, JVM_ACC_STATIC); 726 __ br(Assembler::EQ, L_skip_barrier); // non-static 727 } 728 729 __ load_method_holder(rscratch2, rmethod); 730 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier); 731 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 732 733 __ bind(L_skip_barrier); 734 c2i_no_clinit_check_entry = __ pc(); 735 } 736 737 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 738 bs->c2i_entry_barrier(masm); 739 740 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); 741 742 __ flush(); 743 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry); 744 } 745 746 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 747 VMRegPair *regs, 748 VMRegPair *regs2, 749 int total_args_passed) { 750 assert(regs2 == NULL, "not needed on AArch64"); 751 752 // We return the amount of VMRegImpl stack slots we need to reserve for all 753 // the arguments NOT counting out_preserve_stack_slots. 754 755 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = { 756 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7 757 }; 758 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = { 759 c_farg0, c_farg1, c_farg2, c_farg3, 760 c_farg4, c_farg5, c_farg6, c_farg7 761 }; 762 763 uint int_args = 0; 764 uint fp_args = 0; 765 uint stk_args = 0; // inc by 2 each time 766 767 for (int i = 0; i < total_args_passed; i++) { 768 switch (sig_bt[i]) { 769 case T_BOOLEAN: 770 case T_CHAR: 771 case T_BYTE: 772 case T_SHORT: 773 case T_INT: 774 if (int_args < Argument::n_int_register_parameters_c) { 775 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 776 } else { 777 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 778 stk_args += 2; 779 } 780 break; 781 case T_LONG: 782 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 783 // fall through 784 case T_OBJECT: 785 case T_ARRAY: 786 case T_ADDRESS: 787 case T_METADATA: 788 if (int_args < Argument::n_int_register_parameters_c) { 789 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 790 } else { 791 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 792 stk_args += 2; 793 } 794 break; 795 case T_FLOAT: 796 if (fp_args < Argument::n_float_register_parameters_c) { 797 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 798 } else { 799 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 800 stk_args += 2; 801 } 802 break; 803 case T_DOUBLE: 804 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 805 if (fp_args < Argument::n_float_register_parameters_c) { 806 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 807 } else { 808 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 809 stk_args += 2; 810 } 811 break; 812 case T_VOID: // Halves of longs and doubles 813 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 814 regs[i].set_bad(); 815 break; 816 default: 817 ShouldNotReachHere(); 818 break; 819 } 820 } 821 822 return stk_args; 823 } 824 825 // On 64 bit we will store integer like items to the stack as 826 // 64 bits items (sparc abi) even though java would only store 827 // 32bits for a parameter. On 32bit it will simply be 32 bits 828 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 829 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 830 if (src.first()->is_stack()) { 831 if (dst.first()->is_stack()) { 832 // stack to stack 833 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 834 __ str(rscratch1, Address(sp, reg2offset_out(dst.first()))); 835 } else { 836 // stack to reg 837 __ ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 838 } 839 } else if (dst.first()->is_stack()) { 840 // reg to stack 841 // Do we really have to sign extend??? 842 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 843 __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 844 } else { 845 if (dst.first() != src.first()) { 846 __ sxtw(dst.first()->as_Register(), src.first()->as_Register()); 847 } 848 } 849 } 850 851 // An oop arg. Must pass a handle not the oop itself 852 static void object_move(MacroAssembler* masm, 853 OopMap* map, 854 int oop_handle_offset, 855 int framesize_in_slots, 856 VMRegPair src, 857 VMRegPair dst, 858 bool is_receiver, 859 int* receiver_offset) { 860 861 // must pass a handle. First figure out the location we use as a handle 862 863 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 864 865 // See if oop is NULL if it is we need no handle 866 867 if (src.first()->is_stack()) { 868 869 // Oop is already on the stack as an argument 870 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 871 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 872 if (is_receiver) { 873 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 874 } 875 876 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 877 __ lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 878 // conditionally move a NULL 879 __ cmp(rscratch1, zr); 880 __ csel(rHandle, zr, rHandle, Assembler::EQ); 881 } else { 882 883 // Oop is in an a register we must store it to the space we reserve 884 // on the stack for oop_handles and pass a handle if oop is non-NULL 885 886 const Register rOop = src.first()->as_Register(); 887 int oop_slot; 888 if (rOop == j_rarg0) 889 oop_slot = 0; 890 else if (rOop == j_rarg1) 891 oop_slot = 1; 892 else if (rOop == j_rarg2) 893 oop_slot = 2; 894 else if (rOop == j_rarg3) 895 oop_slot = 3; 896 else if (rOop == j_rarg4) 897 oop_slot = 4; 898 else if (rOop == j_rarg5) 899 oop_slot = 5; 900 else if (rOop == j_rarg6) 901 oop_slot = 6; 902 else { 903 assert(rOop == j_rarg7, "wrong register"); 904 oop_slot = 7; 905 } 906 907 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 908 int offset = oop_slot*VMRegImpl::stack_slot_size; 909 910 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 911 // Store oop in handle area, may be NULL 912 __ str(rOop, Address(sp, offset)); 913 if (is_receiver) { 914 *receiver_offset = offset; 915 } 916 917 __ cmp(rOop, zr); 918 __ lea(rHandle, Address(sp, offset)); 919 // conditionally move a NULL 920 __ csel(rHandle, zr, rHandle, Assembler::EQ); 921 } 922 923 // If arg is on the stack then place it otherwise it is already in correct reg. 924 if (dst.first()->is_stack()) { 925 __ str(rHandle, Address(sp, reg2offset_out(dst.first()))); 926 } 927 } 928 929 // A float arg may have to do float reg int reg conversion 930 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 931 assert(src.first()->is_stack() && dst.first()->is_stack() || 932 src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error"); 933 if (src.first()->is_stack()) { 934 if (dst.first()->is_stack()) { 935 __ ldrw(rscratch1, Address(rfp, reg2offset_in(src.first()))); 936 __ strw(rscratch1, Address(sp, reg2offset_out(dst.first()))); 937 } else { 938 ShouldNotReachHere(); 939 } 940 } else if (src.first() != dst.first()) { 941 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 942 __ fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 943 else 944 ShouldNotReachHere(); 945 } 946 } 947 948 // A long move 949 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 950 if (src.first()->is_stack()) { 951 if (dst.first()->is_stack()) { 952 // stack to stack 953 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 954 __ str(rscratch1, Address(sp, reg2offset_out(dst.first()))); 955 } else { 956 // stack to reg 957 __ ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 958 } 959 } else if (dst.first()->is_stack()) { 960 // reg to stack 961 // Do we really have to sign extend??? 962 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 963 __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 964 } else { 965 if (dst.first() != src.first()) { 966 __ mov(dst.first()->as_Register(), src.first()->as_Register()); 967 } 968 } 969 } 970 971 972 // A double move 973 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 974 assert(src.first()->is_stack() && dst.first()->is_stack() || 975 src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error"); 976 if (src.first()->is_stack()) { 977 if (dst.first()->is_stack()) { 978 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 979 __ str(rscratch1, Address(sp, reg2offset_out(dst.first()))); 980 } else { 981 ShouldNotReachHere(); 982 } 983 } else if (src.first() != dst.first()) { 984 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 985 __ fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 986 else 987 ShouldNotReachHere(); 988 } 989 } 990 991 992 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 993 // We always ignore the frame_slots arg and just use the space just below frame pointer 994 // which by this time is free to use 995 switch (ret_type) { 996 case T_FLOAT: 997 __ strs(v0, Address(rfp, -wordSize)); 998 break; 999 case T_DOUBLE: 1000 __ strd(v0, Address(rfp, -wordSize)); 1001 break; 1002 case T_VOID: break; 1003 default: { 1004 __ str(r0, Address(rfp, -wordSize)); 1005 } 1006 } 1007 } 1008 1009 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1010 // We always ignore the frame_slots arg and just use the space just below frame pointer 1011 // which by this time is free to use 1012 switch (ret_type) { 1013 case T_FLOAT: 1014 __ ldrs(v0, Address(rfp, -wordSize)); 1015 break; 1016 case T_DOUBLE: 1017 __ ldrd(v0, Address(rfp, -wordSize)); 1018 break; 1019 case T_VOID: break; 1020 default: { 1021 __ ldr(r0, Address(rfp, -wordSize)); 1022 } 1023 } 1024 } 1025 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1026 RegSet x; 1027 for ( int i = first_arg ; i < arg_count ; i++ ) { 1028 if (args[i].first()->is_Register()) { 1029 x = x + args[i].first()->as_Register(); 1030 } else if (args[i].first()->is_FloatRegister()) { 1031 __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize))); 1032 } 1033 } 1034 __ push(x, sp); 1035 } 1036 1037 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1038 RegSet x; 1039 for ( int i = first_arg ; i < arg_count ; i++ ) { 1040 if (args[i].first()->is_Register()) { 1041 x = x + args[i].first()->as_Register(); 1042 } else { 1043 ; 1044 } 1045 } 1046 __ pop(x, sp); 1047 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { 1048 if (args[i].first()->is_Register()) { 1049 ; 1050 } else if (args[i].first()->is_FloatRegister()) { 1051 __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize))); 1052 } 1053 } 1054 } 1055 1056 1057 // Check GCLocker::needs_gc and enter the runtime if it's true. This 1058 // keeps a new JNI critical region from starting until a GC has been 1059 // forced. Save down any oops in registers and describe them in an 1060 // OopMap. 1061 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1062 int stack_slots, 1063 int total_c_args, 1064 int total_in_args, 1065 int arg_save_area, 1066 OopMapSet* oop_maps, 1067 VMRegPair* in_regs, 1068 BasicType* in_sig_bt) { Unimplemented(); } 1069 1070 // Unpack an array argument into a pointer to the body and the length 1071 // if the array is non-null, otherwise pass 0 for both. 1072 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); } 1073 1074 1075 class ComputeMoveOrder: public StackObj { 1076 class MoveOperation: public ResourceObj { 1077 friend class ComputeMoveOrder; 1078 private: 1079 VMRegPair _src; 1080 VMRegPair _dst; 1081 int _src_index; 1082 int _dst_index; 1083 bool _processed; 1084 MoveOperation* _next; 1085 MoveOperation* _prev; 1086 1087 static int get_id(VMRegPair r) { Unimplemented(); return 0; } 1088 1089 public: 1090 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst): 1091 _src(src) 1092 , _dst(dst) 1093 , _src_index(src_index) 1094 , _dst_index(dst_index) 1095 , _processed(false) 1096 , _next(NULL) 1097 , _prev(NULL) { Unimplemented(); } 1098 1099 VMRegPair src() const { Unimplemented(); return _src; } 1100 int src_id() const { Unimplemented(); return 0; } 1101 int src_index() const { Unimplemented(); return 0; } 1102 VMRegPair dst() const { Unimplemented(); return _src; } 1103 void set_dst(int i, VMRegPair dst) { Unimplemented(); } 1104 int dst_index() const { Unimplemented(); return 0; } 1105 int dst_id() const { Unimplemented(); return 0; } 1106 MoveOperation* next() const { Unimplemented(); return 0; } 1107 MoveOperation* prev() const { Unimplemented(); return 0; } 1108 void set_processed() { Unimplemented(); } 1109 bool is_processed() const { Unimplemented(); return 0; } 1110 1111 // insert 1112 void break_cycle(VMRegPair temp_register) { Unimplemented(); } 1113 1114 void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); } 1115 }; 1116 1117 private: 1118 GrowableArray<MoveOperation*> edges; 1119 1120 public: 1121 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs, 1122 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); } 1123 1124 // Collected all the move operations 1125 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); } 1126 1127 // Walk the edges breaking cycles between moves. The result list 1128 // can be walked in order to produce the proper set of loads 1129 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; } 1130 }; 1131 1132 1133 static void rt_call(MacroAssembler* masm, address dest) { 1134 CodeBlob *cb = CodeCache::find_blob(dest); 1135 if (cb) { 1136 __ far_call(RuntimeAddress(dest)); 1137 } else { 1138 __ lea(rscratch1, RuntimeAddress(dest)); 1139 __ blr(rscratch1); 1140 __ maybe_isb(); 1141 } 1142 } 1143 1144 static void verify_oop_args(MacroAssembler* masm, 1145 const methodHandle& method, 1146 const BasicType* sig_bt, 1147 const VMRegPair* regs) { 1148 Register temp_reg = r19; // not part of any compiled calling seq 1149 if (VerifyOops) { 1150 for (int i = 0; i < method->size_of_parameters(); i++) { 1151 if (sig_bt[i] == T_OBJECT || 1152 sig_bt[i] == T_ARRAY) { 1153 VMReg r = regs[i].first(); 1154 assert(r->is_valid(), "bad oop arg"); 1155 if (r->is_stack()) { 1156 __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1157 __ verify_oop(temp_reg); 1158 } else { 1159 __ verify_oop(r->as_Register()); 1160 } 1161 } 1162 } 1163 } 1164 } 1165 1166 static void gen_special_dispatch(MacroAssembler* masm, 1167 const methodHandle& method, 1168 const BasicType* sig_bt, 1169 const VMRegPair* regs) { 1170 verify_oop_args(masm, method, sig_bt, regs); 1171 vmIntrinsics::ID iid = method->intrinsic_id(); 1172 1173 // Now write the args into the outgoing interpreter space 1174 bool has_receiver = false; 1175 Register receiver_reg = noreg; 1176 int member_arg_pos = -1; 1177 Register member_reg = noreg; 1178 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1179 if (ref_kind != 0) { 1180 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1181 member_reg = r19; // known to be free at this point 1182 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1183 } else if (iid == vmIntrinsics::_invokeBasic) { 1184 has_receiver = true; 1185 } else { 1186 fatal("unexpected intrinsic id %d", iid); 1187 } 1188 1189 if (member_reg != noreg) { 1190 // Load the member_arg into register, if necessary. 1191 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1192 VMReg r = regs[member_arg_pos].first(); 1193 if (r->is_stack()) { 1194 __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1195 } else { 1196 // no data motion is needed 1197 member_reg = r->as_Register(); 1198 } 1199 } 1200 1201 if (has_receiver) { 1202 // Make sure the receiver is loaded into a register. 1203 assert(method->size_of_parameters() > 0, "oob"); 1204 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1205 VMReg r = regs[0].first(); 1206 assert(r->is_valid(), "bad receiver arg"); 1207 if (r->is_stack()) { 1208 // Porting note: This assumes that compiled calling conventions always 1209 // pass the receiver oop in a register. If this is not true on some 1210 // platform, pick a temp and load the receiver from stack. 1211 fatal("receiver always in a register"); 1212 receiver_reg = r2; // known to be free at this point 1213 __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1214 } else { 1215 // no data motion is needed 1216 receiver_reg = r->as_Register(); 1217 } 1218 } 1219 1220 // Figure out which address we are really jumping to: 1221 MethodHandles::generate_method_handle_dispatch(masm, iid, 1222 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1223 } 1224 1225 // --------------------------------------------------------------------------- 1226 // Generate a native wrapper for a given method. The method takes arguments 1227 // in the Java compiled code convention, marshals them to the native 1228 // convention (handlizes oops, etc), transitions to native, makes the call, 1229 // returns to java state (possibly blocking), unhandlizes any result and 1230 // returns. 1231 // 1232 // Critical native functions are a shorthand for the use of 1233 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1234 // functions. The wrapper is expected to unpack the arguments before 1235 // passing them to the callee and perform checks before and after the 1236 // native call to ensure that they GCLocker 1237 // lock_critical/unlock_critical semantics are followed. Some other 1238 // parts of JNI setup are skipped like the tear down of the JNI handle 1239 // block and the check for pending exceptions it's impossible for them 1240 // to be thrown. 1241 // 1242 // They are roughly structured like this: 1243 // if (GCLocker::needs_gc()) 1244 // SharedRuntime::block_for_jni_critical(); 1245 // tranistion to thread_in_native 1246 // unpack arrray arguments and call native entry point 1247 // check for safepoint in progress 1248 // check if any thread suspend flags are set 1249 // call into JVM and possible unlock the JNI critical 1250 // if a GC was suppressed while in the critical native. 1251 // transition back to thread_in_Java 1252 // return to caller 1253 // 1254 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1255 const methodHandle& method, 1256 int compile_id, 1257 BasicType* in_sig_bt, 1258 VMRegPair* in_regs, 1259 BasicType ret_type, 1260 address critical_entry) { 1261 if (method->is_method_handle_intrinsic()) { 1262 vmIntrinsics::ID iid = method->intrinsic_id(); 1263 intptr_t start = (intptr_t)__ pc(); 1264 int vep_offset = ((intptr_t)__ pc()) - start; 1265 1266 // First instruction must be a nop as it may need to be patched on deoptimisation 1267 __ nop(); 1268 gen_special_dispatch(masm, 1269 method, 1270 in_sig_bt, 1271 in_regs); 1272 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1273 __ flush(); 1274 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1275 return nmethod::new_native_nmethod(method, 1276 compile_id, 1277 masm->code(), 1278 vep_offset, 1279 frame_complete, 1280 stack_slots / VMRegImpl::slots_per_word, 1281 in_ByteSize(-1), 1282 in_ByteSize(-1), 1283 (OopMapSet*)NULL); 1284 } 1285 bool is_critical_native = true; 1286 address native_func = critical_entry; 1287 if (native_func == NULL) { 1288 native_func = method->native_function(); 1289 is_critical_native = false; 1290 } 1291 assert(native_func != NULL, "must have function"); 1292 1293 // An OopMap for lock (and class if static) 1294 OopMapSet *oop_maps = new OopMapSet(); 1295 intptr_t start = (intptr_t)__ pc(); 1296 1297 // We have received a description of where all the java arg are located 1298 // on entry to the wrapper. We need to convert these args to where 1299 // the jni function will expect them. To figure out where they go 1300 // we convert the java signature to a C signature by inserting 1301 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1302 1303 const int total_in_args = method->size_of_parameters(); 1304 int total_c_args = total_in_args; 1305 if (!is_critical_native) { 1306 total_c_args += 1; 1307 if (method->is_static()) { 1308 total_c_args++; 1309 } 1310 } else { 1311 for (int i = 0; i < total_in_args; i++) { 1312 if (in_sig_bt[i] == T_ARRAY) { 1313 total_c_args++; 1314 } 1315 } 1316 } 1317 1318 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1319 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1320 BasicType* in_elem_bt = NULL; 1321 1322 int argc = 0; 1323 if (!is_critical_native) { 1324 out_sig_bt[argc++] = T_ADDRESS; 1325 if (method->is_static()) { 1326 out_sig_bt[argc++] = T_OBJECT; 1327 } 1328 1329 for (int i = 0; i < total_in_args ; i++ ) { 1330 out_sig_bt[argc++] = in_sig_bt[i]; 1331 } 1332 } else { 1333 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); 1334 SignatureStream ss(method->signature()); 1335 for (int i = 0; i < total_in_args ; i++ ) { 1336 if (in_sig_bt[i] == T_ARRAY) { 1337 // Arrays are passed as int, elem* pair 1338 out_sig_bt[argc++] = T_INT; 1339 out_sig_bt[argc++] = T_ADDRESS; 1340 ss.skip_array_prefix(1); // skip one '[' 1341 assert(ss.is_primitive(), "primitive type expected"); 1342 in_elem_bt[i] = ss.type(); 1343 } else { 1344 out_sig_bt[argc++] = in_sig_bt[i]; 1345 in_elem_bt[i] = T_VOID; 1346 } 1347 if (in_sig_bt[i] != T_VOID) { 1348 assert(in_sig_bt[i] == ss.type() || 1349 in_sig_bt[i] == T_ARRAY, "must match"); 1350 ss.next(); 1351 } 1352 } 1353 } 1354 1355 // Now figure out where the args must be stored and how much stack space 1356 // they require. 1357 int out_arg_slots; 1358 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 1359 1360 // Compute framesize for the wrapper. We need to handlize all oops in 1361 // incoming registers 1362 1363 // Calculate the total number of stack slots we will need. 1364 1365 // First count the abi requirement plus all of the outgoing args 1366 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1367 1368 // Now the space for the inbound oop handle area 1369 int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers 1370 if (is_critical_native) { 1371 // Critical natives may have to call out so they need a save area 1372 // for register arguments. 1373 int double_slots = 0; 1374 int single_slots = 0; 1375 for ( int i = 0; i < total_in_args; i++) { 1376 if (in_regs[i].first()->is_Register()) { 1377 const Register reg = in_regs[i].first()->as_Register(); 1378 switch (in_sig_bt[i]) { 1379 case T_BOOLEAN: 1380 case T_BYTE: 1381 case T_SHORT: 1382 case T_CHAR: 1383 case T_INT: single_slots++; break; 1384 case T_ARRAY: // specific to LP64 (7145024) 1385 case T_LONG: double_slots++; break; 1386 default: ShouldNotReachHere(); 1387 } 1388 } else if (in_regs[i].first()->is_FloatRegister()) { 1389 ShouldNotReachHere(); 1390 } 1391 } 1392 total_save_slots = double_slots * 2 + single_slots; 1393 // align the save area 1394 if (double_slots != 0) { 1395 stack_slots = align_up(stack_slots, 2); 1396 } 1397 } 1398 1399 int oop_handle_offset = stack_slots; 1400 stack_slots += total_save_slots; 1401 1402 // Now any space we need for handlizing a klass if static method 1403 1404 int klass_slot_offset = 0; 1405 int klass_offset = -1; 1406 int lock_slot_offset = 0; 1407 bool is_static = false; 1408 1409 if (method->is_static()) { 1410 klass_slot_offset = stack_slots; 1411 stack_slots += VMRegImpl::slots_per_word; 1412 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1413 is_static = true; 1414 } 1415 1416 // Plus a lock if needed 1417 1418 if (method->is_synchronized()) { 1419 lock_slot_offset = stack_slots; 1420 stack_slots += VMRegImpl::slots_per_word; 1421 } 1422 1423 // Now a place (+2) to save return values or temp during shuffling 1424 // + 4 for return address (which we own) and saved rfp 1425 stack_slots += 6; 1426 1427 // Ok The space we have allocated will look like: 1428 // 1429 // 1430 // FP-> | | 1431 // |---------------------| 1432 // | 2 slots for moves | 1433 // |---------------------| 1434 // | lock box (if sync) | 1435 // |---------------------| <- lock_slot_offset 1436 // | klass (if static) | 1437 // |---------------------| <- klass_slot_offset 1438 // | oopHandle area | 1439 // |---------------------| <- oop_handle_offset (8 java arg registers) 1440 // | outbound memory | 1441 // | based arguments | 1442 // | | 1443 // |---------------------| 1444 // | | 1445 // SP-> | out_preserved_slots | 1446 // 1447 // 1448 1449 1450 // Now compute actual number of stack words we need rounding to make 1451 // stack properly aligned. 1452 stack_slots = align_up(stack_slots, StackAlignmentInSlots); 1453 1454 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 1455 1456 // First thing make an ic check to see if we should even be here 1457 1458 // We are free to use all registers as temps without saving them and 1459 // restoring them except rfp. rfp is the only callee save register 1460 // as far as the interpreter and the compiler(s) are concerned. 1461 1462 1463 const Register ic_reg = rscratch2; 1464 const Register receiver = j_rarg0; 1465 1466 Label hit; 1467 Label exception_pending; 1468 1469 assert_different_registers(ic_reg, receiver, rscratch1); 1470 __ verify_oop(receiver); 1471 __ cmp_klass(receiver, ic_reg, rscratch1); 1472 __ br(Assembler::EQ, hit); 1473 1474 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1475 1476 // Verified entry point must be aligned 1477 __ align(8); 1478 1479 __ bind(hit); 1480 1481 int vep_offset = ((intptr_t)__ pc()) - start; 1482 1483 // If we have to make this method not-entrant we'll overwrite its 1484 // first instruction with a jump. For this action to be legal we 1485 // must ensure that this first instruction is a B, BL, NOP, BKPT, 1486 // SVC, HVC, or SMC. Make it a NOP. 1487 __ nop(); 1488 1489 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 1490 Label L_skip_barrier; 1491 __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass* 1492 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier); 1493 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 1494 1495 __ bind(L_skip_barrier); 1496 } 1497 1498 // Generate stack overflow check 1499 if (UseStackBanging) { 1500 __ bang_stack_with_offset(JavaThread::stack_shadow_zone_size()); 1501 } else { 1502 Unimplemented(); 1503 } 1504 1505 // Generate a new frame for the wrapper. 1506 __ enter(); 1507 // -2 because return address is already present and so is saved rfp 1508 __ sub(sp, sp, stack_size - 2*wordSize); 1509 1510 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1511 bs->nmethod_entry_barrier(masm); 1512 1513 // Frame is now completed as far as size and linkage. 1514 int frame_complete = ((intptr_t)__ pc()) - start; 1515 1516 // We use r20 as the oop handle for the receiver/klass 1517 // It is callee save so it survives the call to native 1518 1519 const Register oop_handle_reg = r20; 1520 1521 if (is_critical_native) { 1522 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args, 1523 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 1524 } 1525 1526 // 1527 // We immediately shuffle the arguments so that any vm call we have to 1528 // make from here on out (sync slow path, jvmti, etc.) we will have 1529 // captured the oops from our caller and have a valid oopMap for 1530 // them. 1531 1532 // ----------------- 1533 // The Grand Shuffle 1534 1535 // The Java calling convention is either equal (linux) or denser (win64) than the 1536 // c calling convention. However the because of the jni_env argument the c calling 1537 // convention always has at least one more (and two for static) arguments than Java. 1538 // Therefore if we move the args from java -> c backwards then we will never have 1539 // a register->register conflict and we don't have to build a dependency graph 1540 // and figure out how to break any cycles. 1541 // 1542 1543 // Record esp-based slot for receiver on stack for non-static methods 1544 int receiver_offset = -1; 1545 1546 // This is a trick. We double the stack slots so we can claim 1547 // the oops in the caller's frame. Since we are sure to have 1548 // more args than the caller doubling is enough to make 1549 // sure we can capture all the incoming oop args from the 1550 // caller. 1551 // 1552 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1553 1554 // Mark location of rfp (someday) 1555 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp)); 1556 1557 1558 int float_args = 0; 1559 int int_args = 0; 1560 1561 #ifdef ASSERT 1562 bool reg_destroyed[RegisterImpl::number_of_registers]; 1563 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 1564 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 1565 reg_destroyed[r] = false; 1566 } 1567 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 1568 freg_destroyed[f] = false; 1569 } 1570 1571 #endif /* ASSERT */ 1572 1573 // This may iterate in two different directions depending on the 1574 // kind of native it is. The reason is that for regular JNI natives 1575 // the incoming and outgoing registers are offset upwards and for 1576 // critical natives they are offset down. 1577 GrowableArray<int> arg_order(2 * total_in_args); 1578 VMRegPair tmp_vmreg; 1579 tmp_vmreg.set2(r19->as_VMReg()); 1580 1581 if (!is_critical_native) { 1582 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { 1583 arg_order.push(i); 1584 arg_order.push(c_arg); 1585 } 1586 } else { 1587 // Compute a valid move order, using tmp_vmreg to break any cycles 1588 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg); 1589 } 1590 1591 int temploc = -1; 1592 for (int ai = 0; ai < arg_order.length(); ai += 2) { 1593 int i = arg_order.at(ai); 1594 int c_arg = arg_order.at(ai + 1); 1595 __ block_comment(err_msg("move %d -> %d", i, c_arg)); 1596 if (c_arg == -1) { 1597 assert(is_critical_native, "should only be required for critical natives"); 1598 // This arg needs to be moved to a temporary 1599 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register()); 1600 in_regs[i] = tmp_vmreg; 1601 temploc = i; 1602 continue; 1603 } else if (i == -1) { 1604 assert(is_critical_native, "should only be required for critical natives"); 1605 // Read from the temporary location 1606 assert(temploc != -1, "must be valid"); 1607 i = temploc; 1608 temploc = -1; 1609 } 1610 #ifdef ASSERT 1611 if (in_regs[i].first()->is_Register()) { 1612 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); 1613 } else if (in_regs[i].first()->is_FloatRegister()) { 1614 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!"); 1615 } 1616 if (out_regs[c_arg].first()->is_Register()) { 1617 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 1618 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 1619 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; 1620 } 1621 #endif /* ASSERT */ 1622 switch (in_sig_bt[i]) { 1623 case T_ARRAY: 1624 if (is_critical_native) { 1625 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); 1626 c_arg++; 1627 #ifdef ASSERT 1628 if (out_regs[c_arg].first()->is_Register()) { 1629 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 1630 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 1631 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; 1632 } 1633 #endif 1634 int_args++; 1635 break; 1636 } 1637 case T_OBJECT: 1638 assert(!is_critical_native, "no oop arguments"); 1639 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 1640 ((i == 0) && (!is_static)), 1641 &receiver_offset); 1642 int_args++; 1643 break; 1644 case T_VOID: 1645 break; 1646 1647 case T_FLOAT: 1648 float_move(masm, in_regs[i], out_regs[c_arg]); 1649 float_args++; 1650 break; 1651 1652 case T_DOUBLE: 1653 assert( i + 1 < total_in_args && 1654 in_sig_bt[i + 1] == T_VOID && 1655 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 1656 double_move(masm, in_regs[i], out_regs[c_arg]); 1657 float_args++; 1658 break; 1659 1660 case T_LONG : 1661 long_move(masm, in_regs[i], out_regs[c_arg]); 1662 int_args++; 1663 break; 1664 1665 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 1666 1667 default: 1668 move32_64(masm, in_regs[i], out_regs[c_arg]); 1669 int_args++; 1670 } 1671 } 1672 1673 // point c_arg at the first arg that is already loaded in case we 1674 // need to spill before we call out 1675 int c_arg = total_c_args - total_in_args; 1676 1677 // Pre-load a static method's oop into c_rarg1. 1678 if (method->is_static() && !is_critical_native) { 1679 1680 // load oop into a register 1681 __ movoop(c_rarg1, 1682 JNIHandles::make_local(method->method_holder()->java_mirror()), 1683 /*immediate*/true); 1684 1685 // Now handlize the static class mirror it's known not-null. 1686 __ str(c_rarg1, Address(sp, klass_offset)); 1687 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 1688 1689 // Now get the handle 1690 __ lea(c_rarg1, Address(sp, klass_offset)); 1691 // and protect the arg if we must spill 1692 c_arg--; 1693 } 1694 1695 // Change state to native (we save the return address in the thread, since it might not 1696 // be pushed on the stack when we do a stack traversal). 1697 // We use the same pc/oopMap repeatedly when we call out 1698 1699 Label native_return; 1700 __ set_last_Java_frame(sp, noreg, native_return, rscratch1); 1701 1702 Label dtrace_method_entry, dtrace_method_entry_done; 1703 { 1704 uint64_t offset; 1705 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset); 1706 __ ldrb(rscratch1, Address(rscratch1, offset)); 1707 __ cbnzw(rscratch1, dtrace_method_entry); 1708 __ bind(dtrace_method_entry_done); 1709 } 1710 1711 // RedefineClasses() tracing support for obsolete method entry 1712 if (log_is_enabled(Trace, redefine, class, obsolete)) { 1713 // protect the args we've loaded 1714 save_args(masm, total_c_args, c_arg, out_regs); 1715 __ mov_metadata(c_rarg1, method()); 1716 __ call_VM_leaf( 1717 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1718 rthread, c_rarg1); 1719 restore_args(masm, total_c_args, c_arg, out_regs); 1720 } 1721 1722 // Lock a synchronized method 1723 1724 // Register definitions used by locking and unlocking 1725 1726 const Register swap_reg = r0; 1727 const Register obj_reg = r19; // Will contain the oop 1728 const Register lock_reg = r13; // Address of compiler lock object (BasicLock) 1729 const Register old_hdr = r13; // value of old header at unlock time 1730 const Register tmp = lr; 1731 1732 Label slow_path_lock; 1733 Label lock_done; 1734 1735 if (method->is_synchronized()) { 1736 assert(!is_critical_native, "unhandled"); 1737 1738 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); 1739 1740 // Get the handle (the 2nd argument) 1741 __ mov(oop_handle_reg, c_rarg1); 1742 1743 // Get address of the box 1744 1745 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1746 1747 // Load the oop from the handle 1748 __ ldr(obj_reg, Address(oop_handle_reg, 0)); 1749 1750 __ resolve(IS_NOT_NULL, obj_reg); 1751 1752 if (UseBiasedLocking) { 1753 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock); 1754 } 1755 1756 // Load (object->mark() | 1) into swap_reg %r0 1757 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1758 __ orr(swap_reg, rscratch1, 1); 1759 1760 // Save (object->mark() | 1) into BasicLock's displaced header 1761 __ str(swap_reg, Address(lock_reg, mark_word_offset)); 1762 1763 // src -> dest iff dest == r0 else r0 <- dest 1764 { Label here; 1765 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL); 1766 } 1767 1768 // Hmm should this move to the slow path code area??? 1769 1770 // Test if the oopMark is an obvious stack pointer, i.e., 1771 // 1) (mark & 3) == 0, and 1772 // 2) sp <= mark < mark + os::pagesize() 1773 // These 3 tests can be done by evaluating the following 1774 // expression: ((mark - sp) & (3 - os::vm_page_size())), 1775 // assuming both stack pointer and pagesize have their 1776 // least significant 2 bits clear. 1777 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg 1778 1779 __ sub(swap_reg, sp, swap_reg); 1780 __ neg(swap_reg, swap_reg); 1781 __ ands(swap_reg, swap_reg, 3 - os::vm_page_size()); 1782 1783 // Save the test result, for recursive case, the result is zero 1784 __ str(swap_reg, Address(lock_reg, mark_word_offset)); 1785 __ br(Assembler::NE, slow_path_lock); 1786 1787 // Slow path will re-enter here 1788 1789 __ bind(lock_done); 1790 } 1791 1792 1793 // Finally just about ready to make the JNI call 1794 1795 // get JNIEnv* which is first argument to native 1796 if (!is_critical_native) { 1797 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset()))); 1798 } 1799 1800 // Now set thread in native 1801 __ mov(rscratch1, _thread_in_native); 1802 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1803 __ stlrw(rscratch1, rscratch2); 1804 1805 rt_call(masm, native_func); 1806 1807 __ bind(native_return); 1808 1809 intptr_t return_pc = (intptr_t) __ pc(); 1810 oop_maps->add_gc_map(return_pc - start, map); 1811 1812 // Unpack native results. 1813 switch (ret_type) { 1814 case T_BOOLEAN: __ c2bool(r0); break; 1815 case T_CHAR : __ ubfx(r0, r0, 0, 16); break; 1816 case T_BYTE : __ sbfx(r0, r0, 0, 8); break; 1817 case T_SHORT : __ sbfx(r0, r0, 0, 16); break; 1818 case T_INT : __ sbfx(r0, r0, 0, 32); break; 1819 case T_DOUBLE : 1820 case T_FLOAT : 1821 // Result is in v0 we'll save as needed 1822 break; 1823 case T_ARRAY: // Really a handle 1824 case T_OBJECT: // Really a handle 1825 break; // can't de-handlize until after safepoint check 1826 case T_VOID: break; 1827 case T_LONG: break; 1828 default : ShouldNotReachHere(); 1829 } 1830 1831 // Switch thread to "native transition" state before reading the synchronization state. 1832 // This additional state is necessary because reading and testing the synchronization 1833 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1834 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1835 // VM thread changes sync state to synchronizing and suspends threads for GC. 1836 // Thread A is resumed to finish this native method, but doesn't block here since it 1837 // didn't see any synchronization is progress, and escapes. 1838 __ mov(rscratch1, _thread_in_native_trans); 1839 1840 __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset())); 1841 1842 // Force this write out before the read below 1843 __ dmb(Assembler::ISH); 1844 1845 if (UseSVE > 0) { 1846 // Make sure that jni code does not change SVE vector length. 1847 __ verify_sve_vector_length(); 1848 } 1849 1850 // check for safepoint operation in progress and/or pending suspend requests 1851 Label safepoint_in_progress, safepoint_in_progress_done; 1852 { 1853 __ safepoint_poll_acquire(safepoint_in_progress); 1854 __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset())); 1855 __ cbnzw(rscratch1, safepoint_in_progress); 1856 __ bind(safepoint_in_progress_done); 1857 } 1858 1859 // change thread state 1860 Label after_transition; 1861 __ mov(rscratch1, _thread_in_Java); 1862 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1863 __ stlrw(rscratch1, rscratch2); 1864 __ bind(after_transition); 1865 1866 Label reguard; 1867 Label reguard_done; 1868 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset())); 1869 __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled); 1870 __ br(Assembler::EQ, reguard); 1871 __ bind(reguard_done); 1872 1873 // native result if any is live 1874 1875 // Unlock 1876 Label unlock_done; 1877 Label slow_path_unlock; 1878 if (method->is_synchronized()) { 1879 1880 // Get locked oop from the handle we passed to jni 1881 __ ldr(obj_reg, Address(oop_handle_reg, 0)); 1882 1883 __ resolve(IS_NOT_NULL, obj_reg); 1884 1885 Label done; 1886 1887 if (UseBiasedLocking) { 1888 __ biased_locking_exit(obj_reg, old_hdr, done); 1889 } 1890 1891 // Simple recursive lock? 1892 1893 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1894 __ cbz(rscratch1, done); 1895 1896 // Must save r0 if if it is live now because cmpxchg must use it 1897 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 1898 save_native_result(masm, ret_type, stack_slots); 1899 } 1900 1901 1902 // get address of the stack lock 1903 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1904 // get old displaced header 1905 __ ldr(old_hdr, Address(r0, 0)); 1906 1907 // Atomic swap old header if oop still contains the stack lock 1908 Label succeed; 1909 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock); 1910 __ bind(succeed); 1911 1912 // slow path re-enters here 1913 __ bind(unlock_done); 1914 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 1915 restore_native_result(masm, ret_type, stack_slots); 1916 } 1917 1918 __ bind(done); 1919 } 1920 1921 Label dtrace_method_exit, dtrace_method_exit_done; 1922 { 1923 uint64_t offset; 1924 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset); 1925 __ ldrb(rscratch1, Address(rscratch1, offset)); 1926 __ cbnzw(rscratch1, dtrace_method_exit); 1927 __ bind(dtrace_method_exit_done); 1928 } 1929 1930 __ reset_last_Java_frame(false); 1931 1932 // Unbox oop result, e.g. JNIHandles::resolve result. 1933 if (is_reference_type(ret_type)) { 1934 __ resolve_jobject(r0, rthread, rscratch2); 1935 } 1936 1937 if (CheckJNICalls) { 1938 // clear_pending_jni_exception_check 1939 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset())); 1940 } 1941 1942 if (!is_critical_native) { 1943 // reset handle block 1944 __ ldr(r2, Address(rthread, JavaThread::active_handles_offset())); 1945 __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes())); 1946 } 1947 1948 __ leave(); 1949 1950 if (!is_critical_native) { 1951 // Any exception pending? 1952 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 1953 __ cbnz(rscratch1, exception_pending); 1954 } 1955 1956 // We're done 1957 __ ret(lr); 1958 1959 // Unexpected paths are out of line and go here 1960 1961 if (!is_critical_native) { 1962 // forward the exception 1963 __ bind(exception_pending); 1964 1965 // and forward the exception 1966 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1967 } 1968 1969 // Slow path locking & unlocking 1970 if (method->is_synchronized()) { 1971 1972 __ block_comment("Slow path lock {"); 1973 __ bind(slow_path_lock); 1974 1975 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM 1976 // args are (oop obj, BasicLock* lock, JavaThread* thread) 1977 1978 // protect the args we've loaded 1979 save_args(masm, total_c_args, c_arg, out_regs); 1980 1981 __ mov(c_rarg0, obj_reg); 1982 __ mov(c_rarg1, lock_reg); 1983 __ mov(c_rarg2, rthread); 1984 1985 // Not a leaf but we have last_Java_frame setup as we want 1986 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); 1987 restore_args(masm, total_c_args, c_arg, out_regs); 1988 1989 #ifdef ASSERT 1990 { Label L; 1991 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 1992 __ cbz(rscratch1, L); 1993 __ stop("no pending exception allowed on exit from monitorenter"); 1994 __ bind(L); 1995 } 1996 #endif 1997 __ b(lock_done); 1998 1999 __ block_comment("} Slow path lock"); 2000 2001 __ block_comment("Slow path unlock {"); 2002 __ bind(slow_path_unlock); 2003 2004 // If we haven't already saved the native result we must save it now as xmm registers 2005 // are still exposed. 2006 2007 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 2008 save_native_result(masm, ret_type, stack_slots); 2009 } 2010 2011 __ mov(c_rarg2, rthread); 2012 __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2013 __ mov(c_rarg0, obj_reg); 2014 2015 // Save pending exception around call to VM (which contains an EXCEPTION_MARK) 2016 // NOTE that obj_reg == r19 currently 2017 __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2018 __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2019 2020 rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)); 2021 2022 #ifdef ASSERT 2023 { 2024 Label L; 2025 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2026 __ cbz(rscratch1, L); 2027 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); 2028 __ bind(L); 2029 } 2030 #endif /* ASSERT */ 2031 2032 __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2033 2034 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 2035 restore_native_result(masm, ret_type, stack_slots); 2036 } 2037 __ b(unlock_done); 2038 2039 __ block_comment("} Slow path unlock"); 2040 2041 } // synchronized 2042 2043 // SLOW PATH Reguard the stack if needed 2044 2045 __ bind(reguard); 2046 save_native_result(masm, ret_type, stack_slots); 2047 rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2048 restore_native_result(masm, ret_type, stack_slots); 2049 // and continue 2050 __ b(reguard_done); 2051 2052 // SLOW PATH safepoint 2053 { 2054 __ block_comment("safepoint {"); 2055 __ bind(safepoint_in_progress); 2056 2057 // Don't use call_VM as it will see a possible pending exception and forward it 2058 // and never return here preventing us from clearing _last_native_pc down below. 2059 // 2060 save_native_result(masm, ret_type, stack_slots); 2061 __ mov(c_rarg0, rthread); 2062 #ifndef PRODUCT 2063 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 2064 #endif 2065 if (!is_critical_native) { 2066 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 2067 } else { 2068 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition))); 2069 } 2070 __ blr(rscratch1); 2071 __ maybe_isb(); 2072 // Restore any method result value 2073 restore_native_result(masm, ret_type, stack_slots); 2074 2075 if (is_critical_native) { 2076 // The call above performed the transition to thread_in_Java so 2077 // skip the transition logic above. 2078 __ b(after_transition); 2079 } 2080 2081 __ b(safepoint_in_progress_done); 2082 __ block_comment("} safepoint"); 2083 } 2084 2085 // SLOW PATH dtrace support 2086 { 2087 __ block_comment("dtrace entry {"); 2088 __ bind(dtrace_method_entry); 2089 2090 // We have all of the arguments setup at this point. We must not touch any register 2091 // argument registers at this point (what if we save/restore them there are no oop? 2092 2093 save_args(masm, total_c_args, c_arg, out_regs); 2094 __ mov_metadata(c_rarg1, method()); 2095 __ call_VM_leaf( 2096 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2097 rthread, c_rarg1); 2098 restore_args(masm, total_c_args, c_arg, out_regs); 2099 __ b(dtrace_method_entry_done); 2100 __ block_comment("} dtrace entry"); 2101 } 2102 2103 { 2104 __ block_comment("dtrace exit {"); 2105 __ bind(dtrace_method_exit); 2106 save_native_result(masm, ret_type, stack_slots); 2107 __ mov_metadata(c_rarg1, method()); 2108 __ call_VM_leaf( 2109 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2110 rthread, c_rarg1); 2111 restore_native_result(masm, ret_type, stack_slots); 2112 __ b(dtrace_method_exit_done); 2113 __ block_comment("} dtrace exit"); 2114 } 2115 2116 2117 __ flush(); 2118 2119 nmethod *nm = nmethod::new_native_nmethod(method, 2120 compile_id, 2121 masm->code(), 2122 vep_offset, 2123 frame_complete, 2124 stack_slots / VMRegImpl::slots_per_word, 2125 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2126 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), 2127 oop_maps); 2128 2129 if (is_critical_native) { 2130 nm->set_lazy_critical_native(true); 2131 } 2132 2133 return nm; 2134 2135 } 2136 2137 // this function returns the adjust size (in number of words) to a c2i adapter 2138 // activation for use during deoptimization 2139 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2140 assert(callee_locals >= callee_parameters, 2141 "test and remove; got more parms than locals"); 2142 if (callee_locals < callee_parameters) 2143 return 0; // No adjustment for negative locals 2144 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2145 // diff is counted in stack words 2146 return align_up(diff, 2); 2147 } 2148 2149 2150 //------------------------------generate_deopt_blob---------------------------- 2151 void SharedRuntime::generate_deopt_blob() { 2152 // Allocate space for the code 2153 ResourceMark rm; 2154 // Setup code generation tools 2155 int pad = 0; 2156 #if INCLUDE_JVMCI 2157 if (EnableJVMCI || UseAOT) { 2158 pad += 512; // Increase the buffer size when compiling for JVMCI 2159 } 2160 #endif 2161 CodeBuffer buffer("deopt_blob", 2048+pad, 1024); 2162 MacroAssembler* masm = new MacroAssembler(&buffer); 2163 int frame_size_in_words; 2164 OopMap* map = NULL; 2165 OopMapSet *oop_maps = new OopMapSet(); 2166 2167 // ------------- 2168 // This code enters when returning to a de-optimized nmethod. A return 2169 // address has been pushed on the the stack, and return values are in 2170 // registers. 2171 // If we are doing a normal deopt then we were called from the patched 2172 // nmethod from the point we returned to the nmethod. So the return 2173 // address on the stack is wrong by NativeCall::instruction_size 2174 // We will adjust the value so it looks like we have the original return 2175 // address on the stack (like when we eagerly deoptimized). 2176 // In the case of an exception pending when deoptimizing, we enter 2177 // with a return address on the stack that points after the call we patched 2178 // into the exception handler. We have the following register state from, 2179 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp). 2180 // r0: exception oop 2181 // r19: exception handler 2182 // r3: throwing pc 2183 // So in this case we simply jam r3 into the useless return address and 2184 // the stack looks just like we want. 2185 // 2186 // At this point we need to de-opt. We save the argument return 2187 // registers. We call the first C routine, fetch_unroll_info(). This 2188 // routine captures the return values and returns a structure which 2189 // describes the current frame size and the sizes of all replacement frames. 2190 // The current frame is compiled code and may contain many inlined 2191 // functions, each with their own JVM state. We pop the current frame, then 2192 // push all the new frames. Then we call the C routine unpack_frames() to 2193 // populate these frames. Finally unpack_frames() returns us the new target 2194 // address. Notice that callee-save registers are BLOWN here; they have 2195 // already been captured in the vframeArray at the time the return PC was 2196 // patched. 2197 address start = __ pc(); 2198 Label cont; 2199 2200 // Prolog for non exception case! 2201 2202 // Save everything in sight. 2203 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2204 2205 // Normal deoptimization. Save exec mode for unpack_frames. 2206 __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved 2207 __ b(cont); 2208 2209 int reexecute_offset = __ pc() - start; 2210 #if INCLUDE_JVMCI && !defined(COMPILER1) 2211 if (EnableJVMCI && UseJVMCICompiler) { 2212 // JVMCI does not use this kind of deoptimization 2213 __ should_not_reach_here(); 2214 } 2215 #endif 2216 2217 // Reexecute case 2218 // return address is the pc describes what bci to do re-execute at 2219 2220 // No need to update map as each call to save_live_registers will produce identical oopmap 2221 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2222 2223 __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved 2224 __ b(cont); 2225 2226 #if INCLUDE_JVMCI 2227 Label after_fetch_unroll_info_call; 2228 int implicit_exception_uncommon_trap_offset = 0; 2229 int uncommon_trap_offset = 0; 2230 2231 if (EnableJVMCI || UseAOT) { 2232 implicit_exception_uncommon_trap_offset = __ pc() - start; 2233 2234 __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2235 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2236 2237 uncommon_trap_offset = __ pc() - start; 2238 2239 // Save everything in sight. 2240 RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2241 // fetch_unroll_info needs to call last_java_frame() 2242 Label retaddr; 2243 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2244 2245 __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset()))); 2246 __ movw(rscratch1, -1); 2247 __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset()))); 2248 2249 __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute); 2250 __ mov(c_rarg0, rthread); 2251 __ movw(c_rarg2, rcpool); // exec mode 2252 __ lea(rscratch1, 2253 RuntimeAddress(CAST_FROM_FN_PTR(address, 2254 Deoptimization::uncommon_trap))); 2255 __ blr(rscratch1); 2256 __ bind(retaddr); 2257 oop_maps->add_gc_map( __ pc()-start, map->deep_copy()); 2258 2259 __ reset_last_Java_frame(false); 2260 2261 __ b(after_fetch_unroll_info_call); 2262 } // EnableJVMCI 2263 #endif // INCLUDE_JVMCI 2264 2265 int exception_offset = __ pc() - start; 2266 2267 // Prolog for exception case 2268 2269 // all registers are dead at this entry point, except for r0, and 2270 // r3 which contain the exception oop and exception pc 2271 // respectively. Set them in TLS and fall thru to the 2272 // unpack_with_exception_in_tls entry point. 2273 2274 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 2275 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 2276 2277 int exception_in_tls_offset = __ pc() - start; 2278 2279 // new implementation because exception oop is now passed in JavaThread 2280 2281 // Prolog for exception case 2282 // All registers must be preserved because they might be used by LinearScan 2283 // Exceptiop oop and throwing PC are passed in JavaThread 2284 // tos: stack at point of call to method that threw the exception (i.e. only 2285 // args are on the stack, no return address) 2286 2287 // The return address pushed by save_live_registers will be patched 2288 // later with the throwing pc. The correct value is not available 2289 // now because loading it from memory would destroy registers. 2290 2291 // NB: The SP at this point must be the SP of the method that is 2292 // being deoptimized. Deoptimization assumes that the frame created 2293 // here by save_live_registers is immediately below the method's SP. 2294 // This is a somewhat fragile mechanism. 2295 2296 // Save everything in sight. 2297 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2298 2299 // Now it is safe to overwrite any register 2300 2301 // Deopt during an exception. Save exec mode for unpack_frames. 2302 __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved 2303 2304 // load throwing pc from JavaThread and patch it as the return address 2305 // of the current frame. Then clear the field in JavaThread 2306 2307 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); 2308 __ str(r3, Address(rfp, wordSize)); 2309 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 2310 2311 #ifdef ASSERT 2312 // verify that there is really an exception oop in JavaThread 2313 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 2314 __ verify_oop(r0); 2315 2316 // verify that there is no pending exception 2317 Label no_pending_exception; 2318 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 2319 __ cbz(rscratch1, no_pending_exception); 2320 __ stop("must not have pending exception here"); 2321 __ bind(no_pending_exception); 2322 #endif 2323 2324 __ bind(cont); 2325 2326 // Call C code. Need thread and this frame, but NOT official VM entry 2327 // crud. We cannot block on this call, no GC can happen. 2328 // 2329 // UnrollBlock* fetch_unroll_info(JavaThread* thread) 2330 2331 // fetch_unroll_info needs to call last_java_frame(). 2332 2333 Label retaddr; 2334 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2335 #ifdef ASSERT0 2336 { Label L; 2337 __ ldr(rscratch1, Address(rthread, 2338 JavaThread::last_Java_fp_offset())); 2339 __ cbz(rscratch1, L); 2340 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); 2341 __ bind(L); 2342 } 2343 #endif // ASSERT 2344 __ mov(c_rarg0, rthread); 2345 __ mov(c_rarg1, rcpool); 2346 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); 2347 __ blr(rscratch1); 2348 __ bind(retaddr); 2349 2350 // Need to have an oopmap that tells fetch_unroll_info where to 2351 // find any register it might need. 2352 oop_maps->add_gc_map(__ pc() - start, map); 2353 2354 __ reset_last_Java_frame(false); 2355 2356 #if INCLUDE_JVMCI 2357 if (EnableJVMCI || UseAOT) { 2358 __ bind(after_fetch_unroll_info_call); 2359 } 2360 #endif 2361 2362 // Load UnrollBlock* into r5 2363 __ mov(r5, r0); 2364 2365 __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes())); 2366 Label noException; 2367 __ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending? 2368 __ br(Assembler::NE, noException); 2369 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 2370 // QQQ this is useless it was NULL above 2371 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); 2372 __ str(zr, Address(rthread, JavaThread::exception_oop_offset())); 2373 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 2374 2375 __ verify_oop(r0); 2376 2377 // Overwrite the result registers with the exception results. 2378 __ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes())); 2379 // I think this is useless 2380 // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); 2381 2382 __ bind(noException); 2383 2384 // Only register save data is on the stack. 2385 // Now restore the result registers. Everything else is either dead 2386 // or captured in the vframeArray. 2387 RegisterSaver::restore_result_registers(masm); 2388 2389 // All of the register save area has been popped of the stack. Only the 2390 // return address remains. 2391 2392 // Pop all the frames we must move/replace. 2393 // 2394 // Frame picture (youngest to oldest) 2395 // 1: self-frame (no frame link) 2396 // 2: deopting frame (no frame link) 2397 // 3: caller of deopting frame (could be compiled/interpreted). 2398 // 2399 // Note: by leaving the return address of self-frame on the stack 2400 // and using the size of frame 2 to adjust the stack 2401 // when we are done the return to frame 3 will still be on the stack. 2402 2403 // Pop deoptimized frame 2404 __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); 2405 __ sub(r2, r2, 2 * wordSize); 2406 __ add(sp, sp, r2); 2407 __ ldp(rfp, lr, __ post(sp, 2 * wordSize)); 2408 // LR should now be the return address to the caller (3) 2409 2410 #ifdef ASSERT 2411 // Compilers generate code that bang the stack by as much as the 2412 // interpreter would need. So this stack banging should never 2413 // trigger a fault. Verify that it does not on non product builds. 2414 if (UseStackBanging) { 2415 __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); 2416 __ bang_stack_size(r19, r2); 2417 } 2418 #endif 2419 // Load address of array of frame pcs into r2 2420 __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 2421 2422 // Trash the old pc 2423 // __ addptr(sp, wordSize); FIXME ???? 2424 2425 // Load address of array of frame sizes into r4 2426 __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); 2427 2428 // Load counter into r3 2429 __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); 2430 2431 // Now adjust the caller's stack to make up for the extra locals 2432 // but record the original sp so that we can save it in the skeletal interpreter 2433 // frame and the stack walking of interpreter_sender will get the unextended sp 2434 // value and not the "real" sp value. 2435 2436 const Register sender_sp = r6; 2437 2438 __ mov(sender_sp, sp); 2439 __ ldrw(r19, Address(r5, 2440 Deoptimization::UnrollBlock:: 2441 caller_adjustment_offset_in_bytes())); 2442 __ sub(sp, sp, r19); 2443 2444 // Push interpreter frames in a loop 2445 __ mov(rscratch1, (address)0xDEADDEAD); // Make a recognizable pattern 2446 __ mov(rscratch2, rscratch1); 2447 Label loop; 2448 __ bind(loop); 2449 __ ldr(r19, Address(__ post(r4, wordSize))); // Load frame size 2450 __ sub(r19, r19, 2*wordSize); // We'll push pc and fp by hand 2451 __ ldr(lr, Address(__ post(r2, wordSize))); // Load pc 2452 __ enter(); // Save old & set new fp 2453 __ sub(sp, sp, r19); // Prolog 2454 // This value is corrected by layout_activation_impl 2455 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 2456 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable 2457 __ mov(sender_sp, sp); // Pass sender_sp to next frame 2458 __ sub(r3, r3, 1); // Decrement counter 2459 __ cbnz(r3, loop); 2460 2461 // Re-push self-frame 2462 __ ldr(lr, Address(r2)); 2463 __ enter(); 2464 2465 // Allocate a full sized register save area. We subtract 2 because 2466 // enter() just pushed 2 words 2467 __ sub(sp, sp, (frame_size_in_words - 2) * wordSize); 2468 2469 // Restore frame locals after moving the frame 2470 __ strd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes())); 2471 __ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes())); 2472 2473 // Call C code. Need thread but NOT official VM entry 2474 // crud. We cannot block on this call, no GC can happen. Call should 2475 // restore return values to their stack-slots with the new SP. 2476 // 2477 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode) 2478 2479 // Use rfp because the frames look interpreted now 2480 // Don't need the precise return PC here, just precise enough to point into this code blob. 2481 address the_pc = __ pc(); 2482 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 2483 2484 __ mov(c_rarg0, rthread); 2485 __ movw(c_rarg1, rcpool); // second arg: exec_mode 2486 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 2487 __ blr(rscratch1); 2488 2489 // Set an oopmap for the call site 2490 // Use the same PC we used for the last java frame 2491 oop_maps->add_gc_map(the_pc - start, 2492 new OopMap( frame_size_in_words, 0 )); 2493 2494 // Clear fp AND pc 2495 __ reset_last_Java_frame(true); 2496 2497 // Collect return values 2498 __ ldrd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes())); 2499 __ ldr(r0, Address(sp, RegisterSaver::r0_offset_in_bytes())); 2500 // I think this is useless (throwing pc?) 2501 // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); 2502 2503 // Pop self-frame. 2504 __ leave(); // Epilog 2505 2506 // Jump to interpreter 2507 __ ret(lr); 2508 2509 // Make sure all code is generated 2510 masm->flush(); 2511 2512 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); 2513 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2514 #if INCLUDE_JVMCI 2515 if (EnableJVMCI || UseAOT) { 2516 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); 2517 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); 2518 } 2519 #endif 2520 } 2521 2522 uint SharedRuntime::out_preserve_stack_slots() { 2523 return 0; 2524 } 2525 2526 #ifdef COMPILER2 2527 //------------------------------generate_uncommon_trap_blob-------------------- 2528 void SharedRuntime::generate_uncommon_trap_blob() { 2529 // Allocate space for the code 2530 ResourceMark rm; 2531 // Setup code generation tools 2532 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 2533 MacroAssembler* masm = new MacroAssembler(&buffer); 2534 2535 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); 2536 2537 address start = __ pc(); 2538 2539 // Push self-frame. We get here with a return address in LR 2540 // and sp should be 16 byte aligned 2541 // push rfp and retaddr by hand 2542 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize))); 2543 // we don't expect an arg reg save area 2544 #ifndef PRODUCT 2545 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 2546 #endif 2547 // compiler left unloaded_class_index in j_rarg0 move to where the 2548 // runtime expects it. 2549 if (c_rarg1 != j_rarg0) { 2550 __ movw(c_rarg1, j_rarg0); 2551 } 2552 2553 // we need to set the past SP to the stack pointer of the stub frame 2554 // and the pc to the address where this runtime call will return 2555 // although actually any pc in this code blob will do). 2556 Label retaddr; 2557 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2558 2559 // Call C code. Need thread but NOT official VM entry 2560 // crud. We cannot block on this call, no GC can happen. Call should 2561 // capture callee-saved registers as well as return values. 2562 // Thread is in rdi already. 2563 // 2564 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index); 2565 // 2566 // n.b. 2 gp args, 0 fp args, integral return type 2567 2568 __ mov(c_rarg0, rthread); 2569 __ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap); 2570 __ lea(rscratch1, 2571 RuntimeAddress(CAST_FROM_FN_PTR(address, 2572 Deoptimization::uncommon_trap))); 2573 __ blr(rscratch1); 2574 __ bind(retaddr); 2575 2576 // Set an oopmap for the call site 2577 OopMapSet* oop_maps = new OopMapSet(); 2578 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0); 2579 2580 // location of rfp is known implicitly by the frame sender code 2581 2582 oop_maps->add_gc_map(__ pc() - start, map); 2583 2584 __ reset_last_Java_frame(false); 2585 2586 // move UnrollBlock* into r4 2587 __ mov(r4, r0); 2588 2589 #ifdef ASSERT 2590 { Label L; 2591 __ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes())); 2592 __ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap); 2593 __ br(Assembler::EQ, L); 2594 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); 2595 __ bind(L); 2596 } 2597 #endif 2598 2599 // Pop all the frames we must move/replace. 2600 // 2601 // Frame picture (youngest to oldest) 2602 // 1: self-frame (no frame link) 2603 // 2: deopting frame (no frame link) 2604 // 3: caller of deopting frame (could be compiled/interpreted). 2605 2606 // Pop self-frame. We have no frame, and must rely only on r0 and sp. 2607 __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog! 2608 2609 // Pop deoptimized frame (int) 2610 __ ldrw(r2, Address(r4, 2611 Deoptimization::UnrollBlock:: 2612 size_of_deoptimized_frame_offset_in_bytes())); 2613 __ sub(r2, r2, 2 * wordSize); 2614 __ add(sp, sp, r2); 2615 __ ldp(rfp, lr, __ post(sp, 2 * wordSize)); 2616 // LR should now be the return address to the caller (3) frame 2617 2618 #ifdef ASSERT 2619 // Compilers generate code that bang the stack by as much as the 2620 // interpreter would need. So this stack banging should never 2621 // trigger a fault. Verify that it does not on non product builds. 2622 if (UseStackBanging) { 2623 __ ldrw(r1, Address(r4, 2624 Deoptimization::UnrollBlock:: 2625 total_frame_sizes_offset_in_bytes())); 2626 __ bang_stack_size(r1, r2); 2627 } 2628 #endif 2629 2630 // Load address of array of frame pcs into r2 (address*) 2631 __ ldr(r2, Address(r4, 2632 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 2633 2634 // Load address of array of frame sizes into r5 (intptr_t*) 2635 __ ldr(r5, Address(r4, 2636 Deoptimization::UnrollBlock:: 2637 frame_sizes_offset_in_bytes())); 2638 2639 // Counter 2640 __ ldrw(r3, Address(r4, 2641 Deoptimization::UnrollBlock:: 2642 number_of_frames_offset_in_bytes())); // (int) 2643 2644 // Now adjust the caller's stack to make up for the extra locals but 2645 // record the original sp so that we can save it in the skeletal 2646 // interpreter frame and the stack walking of interpreter_sender 2647 // will get the unextended sp value and not the "real" sp value. 2648 2649 const Register sender_sp = r8; 2650 2651 __ mov(sender_sp, sp); 2652 __ ldrw(r1, Address(r4, 2653 Deoptimization::UnrollBlock:: 2654 caller_adjustment_offset_in_bytes())); // (int) 2655 __ sub(sp, sp, r1); 2656 2657 // Push interpreter frames in a loop 2658 Label loop; 2659 __ bind(loop); 2660 __ ldr(r1, Address(r5, 0)); // Load frame size 2661 __ sub(r1, r1, 2 * wordSize); // We'll push pc and rfp by hand 2662 __ ldr(lr, Address(r2, 0)); // Save return address 2663 __ enter(); // and old rfp & set new rfp 2664 __ sub(sp, sp, r1); // Prolog 2665 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable 2666 // This value is corrected by layout_activation_impl 2667 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 2668 __ mov(sender_sp, sp); // Pass sender_sp to next frame 2669 __ add(r5, r5, wordSize); // Bump array pointer (sizes) 2670 __ add(r2, r2, wordSize); // Bump array pointer (pcs) 2671 __ subsw(r3, r3, 1); // Decrement counter 2672 __ br(Assembler::GT, loop); 2673 __ ldr(lr, Address(r2, 0)); // save final return address 2674 // Re-push self-frame 2675 __ enter(); // & old rfp & set new rfp 2676 2677 // Use rfp because the frames look interpreted now 2678 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP. 2679 // Don't need the precise return PC here, just precise enough to point into this code blob. 2680 address the_pc = __ pc(); 2681 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 2682 2683 // Call C code. Need thread but NOT official VM entry 2684 // crud. We cannot block on this call, no GC can happen. Call should 2685 // restore return values to their stack-slots with the new SP. 2686 // Thread is in rdi already. 2687 // 2688 // BasicType unpack_frames(JavaThread* thread, int exec_mode); 2689 // 2690 // n.b. 2 gp args, 0 fp args, integral return type 2691 2692 // sp should already be aligned 2693 __ mov(c_rarg0, rthread); 2694 __ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap); 2695 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 2696 __ blr(rscratch1); 2697 2698 // Set an oopmap for the call site 2699 // Use the same PC we used for the last java frame 2700 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 2701 2702 // Clear fp AND pc 2703 __ reset_last_Java_frame(true); 2704 2705 // Pop self-frame. 2706 __ leave(); // Epilog 2707 2708 // Jump to interpreter 2709 __ ret(lr); 2710 2711 // Make sure all code is generated 2712 masm->flush(); 2713 2714 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, 2715 SimpleRuntimeFrame::framesize >> 1); 2716 } 2717 #endif // COMPILER2 2718 2719 2720 //------------------------------generate_handler_blob------ 2721 // 2722 // Generate a special Compile2Runtime blob that saves all registers, 2723 // and setup oopmap. 2724 // 2725 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 2726 ResourceMark rm; 2727 OopMapSet *oop_maps = new OopMapSet(); 2728 OopMap* map; 2729 2730 // Allocate space for the code. Setup code generation tools. 2731 CodeBuffer buffer("handler_blob", 2048, 1024); 2732 MacroAssembler* masm = new MacroAssembler(&buffer); 2733 2734 address start = __ pc(); 2735 address call_pc = NULL; 2736 int frame_size_in_words; 2737 bool cause_return = (poll_type == POLL_AT_RETURN); 2738 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); 2739 2740 // Save Integer and Float registers. 2741 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors); 2742 2743 // The following is basically a call_VM. However, we need the precise 2744 // address of the call in order to generate an oopmap. Hence, we do all the 2745 // work outselves. 2746 2747 Label retaddr; 2748 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2749 2750 // The return address must always be correct so that frame constructor never 2751 // sees an invalid pc. 2752 2753 if (!cause_return) { 2754 // overwrite the return address pushed by save_live_registers 2755 // Additionally, r20 is a callee-saved register so we can look at 2756 // it later to determine if someone changed the return address for 2757 // us! 2758 __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset())); 2759 __ str(r20, Address(rfp, wordSize)); 2760 } 2761 2762 // Do the call 2763 __ mov(c_rarg0, rthread); 2764 __ lea(rscratch1, RuntimeAddress(call_ptr)); 2765 __ blr(rscratch1); 2766 __ bind(retaddr); 2767 2768 // Set an oopmap for the call site. This oopmap will map all 2769 // oop-registers and debug-info registers as callee-saved. This 2770 // will allow deoptimization at this safepoint to find all possible 2771 // debug-info recordings, as well as let GC find all oops. 2772 2773 oop_maps->add_gc_map( __ pc() - start, map); 2774 2775 Label noException; 2776 2777 __ reset_last_Java_frame(false); 2778 2779 __ maybe_isb(); 2780 __ membar(Assembler::LoadLoad | Assembler::LoadStore); 2781 2782 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 2783 __ cbz(rscratch1, noException); 2784 2785 // Exception pending 2786 2787 RegisterSaver::restore_live_registers(masm, save_vectors); 2788 2789 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2790 2791 // No exception case 2792 __ bind(noException); 2793 2794 Label no_adjust, bail; 2795 if (!cause_return) { 2796 // If our stashed return pc was modified by the runtime we avoid touching it 2797 __ ldr(rscratch1, Address(rfp, wordSize)); 2798 __ cmp(r20, rscratch1); 2799 __ br(Assembler::NE, no_adjust); 2800 2801 #ifdef ASSERT 2802 // Verify the correct encoding of the poll we're about to skip. 2803 // See NativeInstruction::is_ldrw_to_zr() 2804 __ ldrw(rscratch1, Address(r20)); 2805 __ ubfx(rscratch2, rscratch1, 22, 10); 2806 __ cmpw(rscratch2, 0b1011100101); 2807 __ br(Assembler::NE, bail); 2808 __ ubfx(rscratch2, rscratch1, 0, 5); 2809 __ cmpw(rscratch2, 0b11111); 2810 __ br(Assembler::NE, bail); 2811 #endif 2812 // Adjust return pc forward to step over the safepoint poll instruction 2813 __ add(r20, r20, NativeInstruction::instruction_size); 2814 __ str(r20, Address(rfp, wordSize)); 2815 } 2816 2817 __ bind(no_adjust); 2818 // Normal exit, restore registers and exit. 2819 RegisterSaver::restore_live_registers(masm, save_vectors); 2820 2821 __ ret(lr); 2822 2823 #ifdef ASSERT 2824 __ bind(bail); 2825 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected"); 2826 #endif 2827 2828 // Make sure all code is generated 2829 masm->flush(); 2830 2831 // Fill-out other meta info 2832 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); 2833 } 2834 2835 // 2836 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 2837 // 2838 // Generate a stub that calls into vm to find out the proper destination 2839 // of a java call. All the argument registers are live at this point 2840 // but since this is generic code we don't know what they are and the caller 2841 // must do any gc of the args. 2842 // 2843 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 2844 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 2845 2846 // allocate space for the code 2847 ResourceMark rm; 2848 2849 CodeBuffer buffer(name, 1000, 512); 2850 MacroAssembler* masm = new MacroAssembler(&buffer); 2851 2852 int frame_size_in_words; 2853 2854 OopMapSet *oop_maps = new OopMapSet(); 2855 OopMap* map = NULL; 2856 2857 int start = __ offset(); 2858 2859 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2860 2861 int frame_complete = __ offset(); 2862 2863 { 2864 Label retaddr; 2865 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2866 2867 __ mov(c_rarg0, rthread); 2868 __ lea(rscratch1, RuntimeAddress(destination)); 2869 2870 __ blr(rscratch1); 2871 __ bind(retaddr); 2872 } 2873 2874 // Set an oopmap for the call site. 2875 // We need this not only for callee-saved registers, but also for volatile 2876 // registers that the compiler might be keeping live across a safepoint. 2877 2878 oop_maps->add_gc_map( __ offset() - start, map); 2879 2880 __ maybe_isb(); 2881 2882 // r0 contains the address we are going to jump to assuming no exception got installed 2883 2884 // clear last_Java_sp 2885 __ reset_last_Java_frame(false); 2886 // check for pending exceptions 2887 Label pending; 2888 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 2889 __ cbnz(rscratch1, pending); 2890 2891 // get the returned Method* 2892 __ get_vm_result_2(rmethod, rthread); 2893 __ str(rmethod, Address(sp, RegisterSaver::reg_offset_in_bytes(rmethod))); 2894 2895 // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch 2896 __ str(r0, Address(sp, RegisterSaver::rscratch1_offset_in_bytes())); 2897 RegisterSaver::restore_live_registers(masm); 2898 2899 // We are back the the original state on entry and ready to go. 2900 2901 __ br(rscratch1); 2902 2903 // Pending exception after the safepoint 2904 2905 __ bind(pending); 2906 2907 RegisterSaver::restore_live_registers(masm); 2908 2909 // exception pending => remove activation and forward to exception handler 2910 2911 __ str(zr, Address(rthread, JavaThread::vm_result_offset())); 2912 2913 __ ldr(r0, Address(rthread, Thread::pending_exception_offset())); 2914 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2915 2916 // ------------- 2917 // make sure all code is generated 2918 masm->flush(); 2919 2920 // return the blob 2921 // frame_size_words or bytes?? 2922 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); 2923 } 2924 2925 #ifdef COMPILER2 2926 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame 2927 // 2928 //------------------------------generate_exception_blob--------------------------- 2929 // creates exception blob at the end 2930 // Using exception blob, this code is jumped from a compiled method. 2931 // (see emit_exception_handler in x86_64.ad file) 2932 // 2933 // Given an exception pc at a call we call into the runtime for the 2934 // handler in this method. This handler might merely restore state 2935 // (i.e. callee save registers) unwind the frame and jump to the 2936 // exception handler for the nmethod if there is no Java level handler 2937 // for the nmethod. 2938 // 2939 // This code is entered with a jmp. 2940 // 2941 // Arguments: 2942 // r0: exception oop 2943 // r3: exception pc 2944 // 2945 // Results: 2946 // r0: exception oop 2947 // r3: exception pc in caller or ??? 2948 // destination: exception handler of caller 2949 // 2950 // Note: the exception pc MUST be at a call (precise debug information) 2951 // Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved. 2952 // 2953 2954 void OptoRuntime::generate_exception_blob() { 2955 assert(!OptoRuntime::is_callee_saved_register(R3_num), ""); 2956 assert(!OptoRuntime::is_callee_saved_register(R0_num), ""); 2957 assert(!OptoRuntime::is_callee_saved_register(R2_num), ""); 2958 2959 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); 2960 2961 // Allocate space for the code 2962 ResourceMark rm; 2963 // Setup code generation tools 2964 CodeBuffer buffer("exception_blob", 2048, 1024); 2965 MacroAssembler* masm = new MacroAssembler(&buffer); 2966 2967 // TODO check various assumptions made here 2968 // 2969 // make sure we do so before running this 2970 2971 address start = __ pc(); 2972 2973 // push rfp and retaddr by hand 2974 // Exception pc is 'return address' for stack walker 2975 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize))); 2976 // there are no callee save registers and we don't expect an 2977 // arg reg save area 2978 #ifndef PRODUCT 2979 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 2980 #endif 2981 // Store exception in Thread object. We cannot pass any arguments to the 2982 // handle_exception call, since we do not want to make any assumption 2983 // about the size of the frame where the exception happened in. 2984 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 2985 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 2986 2987 // This call does all the hard work. It checks if an exception handler 2988 // exists in the method. 2989 // If so, it returns the handler address. 2990 // If not, it prepares for stack-unwinding, restoring the callee-save 2991 // registers of the frame being removed. 2992 // 2993 // address OptoRuntime::handle_exception_C(JavaThread* thread) 2994 // 2995 // n.b. 1 gp arg, 0 fp args, integral return type 2996 2997 // the stack should always be aligned 2998 address the_pc = __ pc(); 2999 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1); 3000 __ mov(c_rarg0, rthread); 3001 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); 3002 __ blr(rscratch1); 3003 __ maybe_isb(); 3004 3005 // Set an oopmap for the call site. This oopmap will only be used if we 3006 // are unwinding the stack. Hence, all locations will be dead. 3007 // Callee-saved registers will be the same as the frame above (i.e., 3008 // handle_exception_stub), since they were restored when we got the 3009 // exception. 3010 3011 OopMapSet* oop_maps = new OopMapSet(); 3012 3013 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 3014 3015 __ reset_last_Java_frame(false); 3016 3017 // Restore callee-saved registers 3018 3019 // rfp is an implicitly saved callee saved register (i.e. the calling 3020 // convention will save restore it in prolog/epilog) Other than that 3021 // there are no callee save registers now that adapter frames are gone. 3022 // and we dont' expect an arg reg save area 3023 __ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize))); 3024 3025 // r0: exception handler 3026 3027 // We have a handler in r0 (could be deopt blob). 3028 __ mov(r8, r0); 3029 3030 // Get the exception oop 3031 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 3032 // Get the exception pc in case we are deoptimized 3033 __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset())); 3034 #ifdef ASSERT 3035 __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset())); 3036 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 3037 #endif 3038 // Clear the exception oop so GC no longer processes it as a root. 3039 __ str(zr, Address(rthread, JavaThread::exception_oop_offset())); 3040 3041 // r0: exception oop 3042 // r8: exception handler 3043 // r4: exception pc 3044 // Jump to handler 3045 3046 __ br(r8); 3047 3048 // Make sure all code is generated 3049 masm->flush(); 3050 3051 // Set exception blob 3052 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); 3053 } 3054 #endif // COMPILER2