1 /* 2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2015, Linaro Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "asm/macroAssembler.hpp" 29 #include "asm/macroAssembler.inline.hpp" 30 #include "code/debugInfoRec.hpp" 31 #include "code/icBuffer.hpp" 32 #include "code/vtableStubs.hpp" 33 #include "interp_masm_aarch32.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "prims/jvmtiRedefineClassesTrace.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/vframeArray.hpp" 39 #include "vmreg_aarch32.inline.hpp" 40 #ifdef COMPILER1 41 #include "c1/c1_Runtime1.hpp" 42 #endif 43 #ifdef COMPILER2 44 #include "adfiles/ad_aarch32.hpp" 45 #include "opto/runtime.hpp" 46 #endif 47 48 49 #define __ masm-> 50 51 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; 52 53 class SimpleRuntimeFrame { 54 55 public: 56 57 // Most of the runtime stubs have this simple frame layout. 58 // This class exists to make the layout shared in one place. 59 // Offsets are for compiler stack slots, which are jints. 60 enum layout { 61 // The frame sender code expects that rbp will be in the "natural" place and 62 // will override any oopMap setting for it. We must therefore force the layout 63 // so that it agrees with the frame sender code. 64 // we don't expect any arg reg save area so aarch32 asserts that 65 // frame::arg_reg_save_area_bytes == 0 66 rbp_off = 0, 67 rbp_off2, 68 return_off, return_off2, 69 framesize 70 }; 71 }; 72 73 // FIXME -- this is used by C1 74 class RegisterSaver { 75 public: 76 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 77 static void restore_live_registers(MacroAssembler* masm); 78 79 // Capture info about frame layout 80 enum layout { 81 fpu_state_off = 0, 82 fpu_state_end = fpu_state_off+FPUStateSizeInWords-1, 83 // The frame sender code expects that rfp will be in 84 // the "natural" place and will override any oopMap 85 // setting for it. We must therefore force the layout 86 // so that it agrees with the frame sender code. 87 // 88 // FIXME there are extra saved register (from `push_CPU_state`) note that r11 == rfp 89 r0_off, 90 r1_off, 91 r2_off, 92 r3_off, 93 r4_off, 94 r5_off, 95 r6_off, 96 r7_off, 97 r8_off, 98 r9_off, rscratch1_off = r9_off, 99 r10_off, rmethod_off = r10_off, 100 r11_off, 101 r12_off, 102 reg_save_pad, // align area to 8-bytes to simplify stack alignment to 8 103 rfp_off, 104 return_off, 105 reg_save_size, 106 }; 107 108 109 // Offsets into the register save area 110 // Used by deoptimization when it is managing result register 111 // values on its own 112 113 static int offset_in_bytes(int offset) { return offset * wordSize; } 114 115 // During deoptimization only the result registers need to be restored, 116 // all the other values have already been extracted. 117 static void restore_result_registers(MacroAssembler* masm); 118 119 }; 120 121 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 122 int frame_size_in_bytes = additional_frame_words*wordSize + reg_save_size*BytesPerInt; 123 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 124 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt; 125 *total_frame_words = frame_size_in_bytes / wordSize;; 126 127 __ enter(); 128 __ push_CPU_state(); 129 130 // Set an oopmap for the call site. This oopmap will map all 131 // oop-registers and debug-info registers as callee-saved. This 132 // will allow deoptimization at this safepoint to find all possible 133 // debug-info recordings, as well as let GC find all oops. 134 135 OopMapSet *oop_maps = new OopMapSet(); 136 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 137 138 oop_map->set_callee_saved(VMRegImpl::stack2reg(r0_off + additional_frame_slots), r0->as_VMReg()); 139 oop_map->set_callee_saved(VMRegImpl::stack2reg(r1_off + additional_frame_slots), r1->as_VMReg()); 140 oop_map->set_callee_saved(VMRegImpl::stack2reg(r2_off + additional_frame_slots), r2->as_VMReg()); 141 oop_map->set_callee_saved(VMRegImpl::stack2reg(r3_off + additional_frame_slots), r3->as_VMReg()); 142 oop_map->set_callee_saved(VMRegImpl::stack2reg(r4_off + additional_frame_slots), r4->as_VMReg()); 143 oop_map->set_callee_saved(VMRegImpl::stack2reg(r5_off + additional_frame_slots), r5->as_VMReg()); 144 oop_map->set_callee_saved(VMRegImpl::stack2reg(r6_off + additional_frame_slots), r6->as_VMReg()); 145 oop_map->set_callee_saved(VMRegImpl::stack2reg(r7_off + additional_frame_slots), r7->as_VMReg()); 146 oop_map->set_callee_saved(VMRegImpl::stack2reg(r8_off + additional_frame_slots), r8->as_VMReg()); 147 oop_map->set_callee_saved(VMRegImpl::stack2reg(r10_off + additional_frame_slots), r10->as_VMReg()); 148 // r11 saved in frame header as rfp, not map it here 149 // r11 & r14 have special meaning (can't hold oop), so not map them 150 151 for (int i = 0; i < 31; ++i) { 152 oop_map->set_callee_saved(VMRegImpl::stack2reg(fpu_state_off + i + additional_frame_slots), 153 as_FloatRegister(i)->as_VMReg()); 154 } 155 156 return oop_map; 157 } 158 159 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 160 __ pop_CPU_state(); 161 __ leave(); 162 } 163 164 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 165 166 // Just restore result register. Only used by deoptimization. By 167 // now any callee save register that needs to be restored to a c2 168 // caller of the deoptee has been extracted into the vframeArray 169 // and will be stuffed into the c2i adapter we create for later 170 // restoration so only result registers need to be restored here. 171 172 173 174 // Restore fp result register 175 __ vldr_f64(d0, Address(sp, offset_in_bytes(fpu_state_off))); 176 // Restore integer result register 177 __ ldr(r0, Address(sp, offset_in_bytes(r0_off))); 178 __ ldr(r1, Address(sp, offset_in_bytes(r1_off))); 179 180 // Pop all of the register save are off the stack 181 __ add(sp, sp, reg_save_size * wordSize); 182 } 183 184 // Is vector's size (in bytes) bigger than a size saved by default? 185 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions. 186 bool SharedRuntime::is_wide_vector(int size) { 187 return size > 16; 188 } 189 190 // This functions returns offset from fp to java arguments on stack. 191 // 192 // The java_calling_convention describes stack locations as ideal slots on 193 // a frame with no abi restrictions. Since we must observe abi restrictions 194 // (like the placement of the register window) the slots must be biased by 195 // the following value. 196 static int reg2offset_in(VMReg r) { 197 // After stack frame created, fp points to 1 slot after previous sp value. 198 return (r->reg2stack() + 1) * VMRegImpl::stack_slot_size; 199 } 200 201 static int reg2offset_out(VMReg r) { 202 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 203 } 204 205 // --------------------------------------------------------------------------- 206 // Read the array of BasicTypes from a signature, and compute where the 207 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 208 // quantities. Values less than VMRegImpl::stack0 are registers, those above 209 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 210 // as framesizes are fixed. 211 // VMRegImpl::stack0 refers to the first slot 0(sp). 212 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 213 // up to RegisterImpl::number_of_registers) are the 64-bit 214 // integer registers. 215 216 // Note: the INPUTS in sig_bt are in units of Java argument words, 217 // which are 64-bit. The OUTPUTS are in 32-bit units. 218 219 // The Java calling convention is a "shifted" version of the C ABI. 220 // By skipping the first C ABI register we can call non-static jni 221 // methods with small numbers of arguments without having to shuffle 222 // the arguments at all. Since we control the java ABI we ought to at 223 // least get some advantage out of it. 224 225 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 226 VMRegPair *regs, 227 int total_args_passed, 228 int is_outgoing) { 229 230 // Create the mapping between argument positions and 231 // registers. 232 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { 233 j_rarg0, j_rarg1, j_rarg2, j_rarg3 234 }; 235 const int FP_ArgReg_N = 16; 236 static const FloatRegister FP_ArgReg[] = { 237 f0, f1, f2, f3, 238 f4, f5, f6, f7, 239 f8, f9, f10, f11, 240 f12, f13, f14, f15, 241 }; 242 243 uint int_args = 0; 244 uint fp_args = 0; 245 uint stk_args = 0; 246 247 for (int i = 0; i < total_args_passed; i++) { 248 switch (sig_bt[i]) { 249 case T_BOOLEAN: 250 case T_CHAR: 251 case T_BYTE: 252 case T_SHORT: 253 case T_INT: 254 case T_OBJECT: 255 case T_ARRAY: 256 case T_ADDRESS: 257 if (int_args < Argument::n_int_register_parameters_j) { 258 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 259 } else { 260 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 261 stk_args += 1; 262 } 263 break; 264 case T_VOID: 265 // halves of T_LONG or T_DOUBLE 266 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 267 regs[i].set_bad(); 268 break; 269 case T_LONG: 270 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 271 if (int_args + 1 < Argument::n_int_register_parameters_j) { 272 regs[i].set_pair(INT_ArgReg[int_args + 1]->as_VMReg(), INT_ArgReg[int_args]->as_VMReg()); 273 int_args += 2; 274 } else { 275 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 276 stk_args += 2; 277 } 278 break; 279 case T_FLOAT: 280 if (fp_args < FP_ArgReg_N) { 281 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 282 } else { 283 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 284 stk_args += 1; 285 } 286 break; 287 case T_DOUBLE: 288 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 289 fp_args = round_to(fp_args, 2); 290 if (fp_args < FP_ArgReg_N) { 291 regs[i].set2(FP_ArgReg[fp_args]->as_VMReg()); 292 fp_args += 2; 293 } else { 294 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 295 stk_args += 2; 296 } 297 break; 298 default: 299 ShouldNotReachHere(); 300 break; 301 } 302 } 303 304 return round_to(stk_args, StackAlignmentInBytes/wordSize); 305 } 306 307 // Patch the callers callsite with entry to compiled code if it exists. 308 static void patch_callers_callsite(MacroAssembler *masm) { 309 Label L; 310 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); 311 __ cbz(rscratch1, L); 312 313 __ enter(); 314 __ push_CPU_state(); 315 316 // VM needs caller's callsite 317 // VM needs target method 318 // This needs to be a long call since we will relocate this adapter to 319 // the codeBuffer and it may not reach 320 321 #ifndef PRODUCT 322 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 323 #endif 324 325 __ mov(c_rarg0, rmethod); 326 __ mov(c_rarg1, lr); 327 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); 328 __ bl(rscratch1); 329 __ maybe_isb(); 330 331 __ pop_CPU_state(); 332 // restore sp 333 __ leave(); 334 __ bind(L); 335 } 336 337 static void gen_c2i_adapter(MacroAssembler *masm, 338 int total_args_passed, 339 int comp_args_on_stack, 340 const BasicType *sig_bt, 341 const VMRegPair *regs, 342 Label& skip_fixup) { 343 // Before we get into the guts of the C2I adapter, see if we should be here 344 // at all. We've come from compiled code and are attempting to jump to the 345 // interpreter, which means the caller made a static call to get here 346 // (vcalls always get a compiled target if there is one). Check for a 347 // compiled target. If there is one, we need to patch the caller's call. 348 patch_callers_callsite(masm); 349 350 __ bind(skip_fixup); 351 352 // Since all args are passed on the stack, total_args_passed * 353 // Interpreter::stackElementSize is the space we need. 354 355 const int extraspace = total_args_passed * Interpreter::stackElementSize; 356 const Register compArgPos = lr; 357 int ld_shift = 0; 358 359 __ str(compArgPos, Address(sp, -(extraspace + wordSize))); 360 __ mov(compArgPos, sp); 361 362 // Now write the args into the outgoing interpreter space 363 for (int i = 0; i < total_args_passed; i++) { 364 365 if (sig_bt[i] == T_VOID) { 366 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 367 continue; 368 } 369 370 // next stack slot offset 371 const int next_off = -Interpreter::stackElementSize; 372 373 VMReg r_1 = regs[i].first(); 374 VMReg r_2 = regs[i].second(); 375 if (!r_1->is_valid()) { 376 assert(!r_2->is_valid(), ""); 377 continue; 378 } 379 380 if (r_2->is_valid()) { 381 assert(i + 1 < total_args_passed && sig_bt[i + 1] == T_VOID, "going to overrwrite reg_2 value"); 382 } 383 384 if (r_1->is_stack()) { 385 // memory to memory use rscratch1 386 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size - ld_shift; 387 if (!r_2->is_valid()) { 388 __ ldr(rscratch1, Address(compArgPos, ld_off)); 389 __ str(rscratch1, Address(sp, next_off, Address::pre)); 390 } else { 391 int tmp_off = ld_off; 392 // ldrd accepts only imm8 393 if(abs(ld_off) > (255 << 2)) { 394 if(__ is_valid_for_imm12(ld_off)) { 395 __ add(compArgPos, compArgPos, ld_off); 396 } else { 397 // add operates encoded imm12, NOT plain 398 __ mov(rscratch1, ld_off); 399 __ add(compArgPos, compArgPos, rscratch1); 400 } 401 tmp_off = 0; 402 ld_shift += ld_off; 403 } 404 __ ldrd(rscratch1, rscratch2, Address(compArgPos, tmp_off)); 405 __ strd(rscratch1, rscratch2, Address(sp, 2* next_off, Address::pre)); 406 } 407 } else if (r_1->is_Register()) { 408 Register r = r_1->as_Register(); 409 assert(r != compArgPos, "compArgPos was modified"); 410 if (!r_2->is_valid()) { 411 __ str(r, Address(sp, next_off, Address::pre)); 412 } else { 413 assert(r_2->as_Register() != compArgPos, "compArgPos was modified"); 414 __ strd(r, r_2->as_Register(), Address(sp, 2 * next_off, Address::pre)); 415 } 416 } else { 417 assert(r_1->is_FloatRegister(), ""); 418 if (!r_2->is_valid()) { 419 // Can't do pre or post addressing for vldr, vstr 420 __ add(sp, sp, next_off); 421 __ vstr_f32(r_1->as_FloatRegister(), Address(sp)); 422 } else { 423 // TODO assert(r_2->is_FloatRegister() && r_2->as_FloatRegister() == r_1->as_FloatRegister() + 1, ""); 424 // Can't do pre or post addressing for vldr, vstr 425 __ add(sp, sp, 2 * next_off); 426 __ vstr_f64(r_1->as_FloatRegister(), Address(sp)); 427 } 428 } 429 } 430 431 // hope, sp is returned to desired value 432 __ ldr(compArgPos, Address(sp, -wordSize)); 433 434 // set sender sp 435 if(__ is_valid_for_imm12(extraspace)) { 436 __ add(r4, sp, extraspace); 437 } else { 438 __ mov(rscratch1, extraspace); 439 __ add(r4, sp, rscratch1); 440 } 441 442 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset()))); 443 __ b(rscratch1); 444 } 445 446 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, 447 address code_start, address code_end, 448 Label& L_ok) { 449 Label L_fail; 450 __ lea(temp_reg, ExternalAddress(code_start)); 451 __ cmp(pc_reg, temp_reg); 452 __ b(L_fail, Assembler::LO); 453 __ lea(temp_reg, ExternalAddress(code_end)); 454 __ cmp(pc_reg, temp_reg); 455 __ b(L_ok, Assembler::LO); 456 __ bind(L_fail); 457 } 458 459 static void gen_i2c_adapter(MacroAssembler *masm, 460 int total_args_passed, 461 int comp_args_on_stack, 462 const BasicType *sig_bt, 463 const VMRegPair *regs) { 464 465 // Note: r13 contains the senderSP on entry. We must preserve it since 466 // we may do a i2c -> c2i transition if we lose a race where compiled 467 // code goes non-entrant while we get args ready. 468 469 // In addition we use r13 to locate all the interpreter args because 470 // we must align the stack to 16 bytes. 471 472 // Adapters are frameless. 473 474 // An i2c adapter is frameless because the *caller* frame, which is 475 // interpreted, routinely repairs its own sp (from 476 // interpreter_frame_last_sp), even if a callee has modified the 477 // stack pointer. It also recalculates and aligns sp. 478 479 // A c2i adapter is frameless because the *callee* frame, which is 480 // interpreted, routinely repairs its caller's sp (from sender_sp, 481 // which is set up via the senderSP register). 482 483 // In other words, if *either* the caller or callee is interpreted, we can 484 // get the stack pointer repaired after a call. 485 486 // This is why c2i and i2c adapters cannot be indefinitely composed. 487 // In particular, if a c2i adapter were to somehow call an i2c adapter, 488 // both caller and callee would be compiled methods, and neither would 489 // clean up the stack pointer changes performed by the two adapters. 490 // If this happens, control eventually transfers back to the compiled 491 // caller, but with an uncorrected stack, causing delayed havoc. 492 493 if (VerifyAdapterCalls && 494 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { 495 // So, let's test for cascading c2i/i2c adapters right now. 496 // assert(Interpreter::contains($return_addr) || 497 // StubRoutines::contains($return_addr), 498 // "i2c adapter must return to an interpreter frame"); 499 __ block_comment("verify_i2c { "); 500 Label L_ok; 501 if (Interpreter::code() != NULL) 502 range_check(masm, lr, rscratch1, 503 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 504 L_ok); 505 if (StubRoutines::code1() != NULL) 506 range_check(masm, lr, rscratch1, 507 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), 508 L_ok); 509 if (StubRoutines::code2() != NULL) 510 range_check(masm, lr, rscratch1, 511 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), 512 L_ok); 513 const char* msg = "i2c adapter must return to an interpreter frame"; 514 __ block_comment(msg); 515 __ stop(msg); 516 __ bind(L_ok); 517 __ block_comment("} verify_i2ce "); 518 } 519 520 const int stack_space = round_to(comp_args_on_stack * VMRegImpl::stack_slot_size, StackAlignmentInBytes); 521 const int ld_high = total_args_passed *Interpreter::stackElementSize; 522 // Point to interpreter value (vs. tag) 523 const int next_off = -Interpreter::stackElementSize; // offset from ld ptr 524 const Register loadCounter = lr; 525 526 // Align sp to StackAlignmentInBytes so compiled frame starts always aligned 527 // This is required by APCS, so all native code depends on it. The compiled 528 // Java code is not required to follow this standard however doing so 529 // simplifies the code because allows to have fixed size for compiled frames 530 __ mov(rscratch2, sp); 531 __ align_stack(); 532 if(total_args_passed) { 533 // put below reserved stack space, imm12 should be enough 534 __ str(loadCounter, Address(sp, -(stack_space + wordSize))); 535 536 if(__ is_valid_for_imm12(ld_high)) { 537 __ add(loadCounter, rscratch2, ld_high); 538 } else { 539 // add operates encoded imm12, we need plain 540 __ mov(rscratch1, ld_high); 541 __ add(loadCounter, rscratch2, rscratch1); 542 } 543 } 544 545 if(comp_args_on_stack) { 546 if(__ is_valid_for_imm12(stack_space)) { 547 __ sub(sp, sp, stack_space); 548 } else { 549 // add operates encoded imm12, we need plain 550 __ mov(rscratch1, stack_space); 551 __ sub(sp, sp, rscratch1); 552 } 553 } 554 555 // +------+ -> r4 556 // | 0 | \ 557 // | 1 | \ 558 // | 2 | - > Load in argument order going down. 559 // | x | / 560 // | N | / 561 // +------+ -> inital sp 562 // | pad | maybe 1 word to align the stack to 8 bytes 563 // | M | \ 564 // | x | \ 565 // | 2 | -> Load in argument order going up. 566 // | 1 | / 567 // | 0 | / 568 // +------+ -> 569 570 571 int sp_offset = 0; 572 573 // Now generate the shuffle code. 574 for (int i = 0; i < total_args_passed; i++) { 575 576 if (sig_bt[i] == T_VOID) { 577 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 578 continue; 579 } 580 581 // Pick up 0, 1 or 2 words from SP+offset. 582 583 // 584 // 585 // 586 VMReg r_1 = regs[i].first(); 587 VMReg r_2 = regs[i].second(); 588 if (!r_1->is_valid()) { 589 assert(!r_2->is_valid(), ""); 590 continue; 591 } 592 593 if (r_2->is_valid()) { 594 assert(i + 1 < total_args_passed && sig_bt[i + 1] == T_VOID, "going to overrwrite reg_2 value"); 595 } 596 597 if (r_1->is_stack()) { 598 // Convert stack slot to an SP offset 599 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size - sp_offset; 600 601 if (!r_2->is_valid()) { 602 __ ldr(rscratch2, Address(loadCounter, next_off, Address::pre)); 603 __ str(rscratch2, Address(sp, st_off)); 604 } else { 605 int tmp_off = st_off; 606 if(abs(st_off) > (255 << 2)) { 607 //st_off doesn't fit imm8 required by strd 608 609 if(__ is_valid_for_imm12(st_off)) { 610 __ add(sp, sp, st_off); 611 } else { 612 // add operates encoded imm12, NOT plain 613 __ mov(rscratch1, st_off); 614 __ add(sp, sp, rscratch1); 615 } 616 tmp_off = 0; 617 sp_offset += st_off; 618 } 619 620 621 // Interpreter local[n] == MSW, local[n+1] == LSW however locals 622 // are accessed as negative so LSW is at LOW address 623 624 // this can be a misaligned move 625 __ ldrd(rscratch1, rscratch2, Address(loadCounter, 2 * next_off, Address::pre)); 626 __ strd(rscratch1, rscratch2, Address(sp, tmp_off)); 627 } 628 } else if (r_1->is_Register()) { // Register argument 629 Register r = r_1->as_Register(); 630 assert(r != loadCounter, "loadCounter is reloaded"); 631 if (r_2->is_valid()) { 632 assert(r_2->as_Register() != loadCounter, "loadCounter is reloaded"); 633 // this can be a misaligned move 634 // ldrd can handle inconsecutive registers 635 __ ldrd(r, r_2->as_Register(), Address(loadCounter, 2 * next_off, Address::pre)); 636 } else { 637 __ ldr(r, Address(loadCounter, next_off, Address::pre)); 638 } 639 } else { 640 assert(r_1->is_FloatRegister(), ""); 641 if (!r_2->is_valid()) { 642 // Can't do pre or post addressing for vldr, vstr 643 __ add(loadCounter, loadCounter, next_off); 644 __ vldr_f32(r_1->as_FloatRegister(), Address(loadCounter)); 645 } else { 646 // TODO assert(r_2->is_FloatRegister() && r_2->as_FloatRegister() == r_1->as_FloatRegister() + 1, ""); 647 // Can't do pre or post addressing for vldr, vstr 648 __ add(loadCounter, loadCounter, 2 * next_off); 649 __ vldr_f64(r_1->as_FloatRegister(), Address(loadCounter)); 650 } 651 } 652 } 653 654 // restore sp 655 if(sp_offset) { 656 if(__ is_valid_for_imm12(sp_offset)) { 657 __ sub(sp, sp, sp_offset); 658 } else { 659 // add operates encoded imm12, we need plain 660 __ mov(rscratch1, sp_offset); 661 __ sub(sp, sp, rscratch1); 662 } 663 } 664 665 if(total_args_passed) { 666 // restore loadCounter 667 __ ldr(loadCounter, Address(sp, -wordSize)); 668 } 669 670 // 6243940 We might end up in handle_wrong_method if 671 // the callee is deoptimized as we race thru here. If that 672 // happens we don't want to take a safepoint because the 673 // caller frame will look interpreted and arguments are now 674 // "compiled" so it is much better to make this transition 675 // invisible to the stack walking code. Unfortunately if 676 // we try and find the callee by normal means a safepoint 677 // is possible. So we stash the desired callee in the thread 678 // and the vm will find there should this case occur. 679 680 __ str(rmethod, Address(rthread, JavaThread::callee_target_offset())); 681 682 // Will jump to the compiled code just as if compiled code was doing it. 683 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset()))); 684 __ b(rscratch1); 685 } 686 687 // --------------------------------------------------------------- 688 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 689 int total_args_passed, 690 int comp_args_on_stack, 691 const BasicType *sig_bt, 692 const VMRegPair *regs, 693 AdapterFingerPrint* fingerprint) { 694 address i2c_entry = __ pc(); 695 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 696 697 address c2i_unverified_entry = __ pc(); 698 Label skip_fixup; 699 700 Label ok; 701 702 Register holder = rscratch2; 703 Register receiver = j_rarg0; 704 Register tmp = r10; // A call-clobbered register not used for arg passing 705 706 // ------------------------------------------------------------------------- 707 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls 708 // to the interpreter. The args start out packed in the compiled layout. They 709 // need to be unpacked into the interpreter layout. This will almost always 710 // require some stack space. We grow the current (compiled) stack, then repack 711 // the args. We finally end in a jump to the generic interpreter entry point. 712 // On exit from the interpreter, the interpreter will restore our SP (lest the 713 // compiled code, which relys solely on SP and not FP, get sick). 714 715 { 716 __ block_comment("c2i_unverified_entry {"); 717 __ load_klass(rscratch1, receiver); 718 __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset())); 719 __ cmp(rscratch1, tmp); 720 __ ldr(rmethod, Address(holder, CompiledICHolder::holder_method_offset())); 721 __ b(ok, Assembler::EQ); 722 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 723 724 __ bind(ok); 725 // Method might have been compiled since the call site was patched to 726 // interpreted; if that is the case treat it as a miss so we can get 727 // the call site corrected. 728 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); 729 __ cbz(rscratch1, skip_fixup); 730 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 731 __ block_comment("} c2i_unverified_entry"); 732 } 733 734 address c2i_entry = __ pc(); 735 736 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); 737 738 __ flush(); 739 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 740 } 741 742 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 743 VMRegPair *regs, 744 VMRegPair *regs2, 745 int total_args_passed) { 746 assert(regs2 == NULL, "not needed on AArch32"); 747 748 // We return the amount of VMRegImpl stack slots we need to reserve for all 749 // the arguments NOT counting out_preserve_stack_slots. 750 751 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = { 752 c_rarg0, c_rarg1, c_rarg2, c_rarg3 753 }; 754 const int FP_ArgReg_N = 16; 755 static const FloatRegister FP_ArgReg[] = { 756 f0, f1, f2, f3, 757 f4, f5, f6, f7, 758 f8, f9, f10, f11, 759 f12, f13, f14, f15, 760 }; 761 unsigned long fp_free_mask = (1 << FP_ArgReg_N) - 1; 762 763 uint int_args = 0; 764 uint fp_args = 0; 765 uint stk_args = 0; 766 767 for (int i = 0; i < total_args_passed; i++) { 768 switch (sig_bt[i]) { 769 case T_BOOLEAN: 770 case T_CHAR: 771 case T_BYTE: 772 case T_SHORT: 773 case T_INT: 774 case T_OBJECT: 775 case T_ARRAY: 776 case T_ADDRESS: 777 case T_METADATA: 778 if (int_args < Argument::n_int_register_parameters_c) { 779 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 780 } else { 781 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 782 stk_args += 1; 783 } 784 break; 785 case T_LONG: 786 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 787 if (int_args + 1 < Argument::n_int_register_parameters_c) { 788 if ((int_args % 2) != 0) { 789 ++int_args; 790 } 791 regs[i].set2(INT_ArgReg[int_args]->as_VMReg()); 792 int_args += 2; 793 } else { 794 if (stk_args % 2 != 0) { 795 ++stk_args; 796 } 797 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 798 stk_args += 2; 799 int_args = Argument::n_int_register_parameters_c; 800 } 801 break; 802 case T_FLOAT: 803 if (fp_free_mask & ((1 << FP_ArgReg_N)-1)) { 804 unsigned index = __builtin_ctz(fp_free_mask); 805 regs[i].set1(FP_ArgReg[index]->as_VMReg()); 806 fp_free_mask &= ~(1 << index); 807 fp_args += 2 * ((~index) & 1); 808 } else { 809 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 810 stk_args += 1; 811 } 812 break; 813 case T_DOUBLE: 814 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 815 if (fp_args + 1 < FP_ArgReg_N) { 816 fp_free_mask &= ~(3 << fp_args); 817 regs[i].set2(FP_ArgReg[fp_args]->as_VMReg()); 818 fp_args += 2; 819 } else { 820 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 821 stk_args += 2; 822 } 823 break; 824 case T_VOID: // Halves of longs and doubles 825 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 826 regs[i].set_bad(); 827 break; 828 default: 829 ShouldNotReachHere(); 830 break; 831 } 832 } 833 834 return round_to(stk_args, StackAlignmentInBytes/wordSize); 835 } 836 837 // On 64 bit we will store integer like items to the stack as 838 // 64 bits items (sparc abi) even though java would only store 839 // 32bits for a parameter. On 32bit it will simply be 32 bits 840 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 841 842 static void move_int(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 843 if (src.first()->is_stack()) { 844 if (dst.first()->is_stack()) { 845 // stack to stack 846 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 847 __ str(rscratch1, Address(sp, reg2offset_out(dst.first()))); 848 } else { 849 // stack to reg 850 __ ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 851 } 852 } else if (dst.first()->is_stack()) { 853 // reg to stack 854 __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 855 } else { 856 if (dst.first() != src.first()) { 857 __ mov(dst.first()->as_Register(), src.first()->as_Register()); 858 } 859 } 860 } 861 862 // An oop arg. Must pass a handle not the oop itself 863 static void object_move(MacroAssembler* masm, 864 OopMap* map, 865 int oop_handle_offset, 866 int framesize_in_slots, 867 VMRegPair src, 868 VMRegPair dst, 869 bool is_receiver, 870 int* receiver_offset) { 871 872 // must pass a handle. First figure out the location we use as a handle 873 874 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 875 876 // See if oop is NULL if it is we need no handle 877 878 if (src.first()->is_stack()) { 879 880 // Oop is already on the stack as an argument 881 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 882 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 883 if (is_receiver) { 884 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 885 } 886 887 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 888 __ lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 889 // conditionally move a NULL 890 __ cmp(rscratch1, 0); 891 __ mov(rHandle, 0, Assembler::EQ); 892 } else { 893 894 // Oop is in an a register we must store it to the space we reserve 895 // on the stack for oop_handles and pass a handle if oop is non-NULL 896 897 const Register rOop = src.first()->as_Register(); 898 int oop_slot; 899 if (rOop == j_rarg0) 900 oop_slot = 0; 901 else if (rOop == j_rarg1) 902 oop_slot = 1; 903 else if (rOop == j_rarg2) 904 oop_slot = 2; 905 else { 906 assert(rOop == j_rarg3, "wrong register"); 907 oop_slot = 3; 908 } 909 910 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 911 int offset = oop_slot*VMRegImpl::stack_slot_size; 912 913 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 914 // Store oop in handle area, may be NULL 915 __ str(rOop, Address(sp, offset)); 916 if (is_receiver) { 917 *receiver_offset = offset; 918 } 919 920 __ cmp(rOop, 0); 921 __ lea(rHandle, Address(sp, offset)); 922 // conditionally move a NULL 923 __ mov(rHandle, 0, Assembler::EQ); 924 } 925 926 // If arg is on the stack then place it otherwise it is already in correct reg. 927 if (dst.first()->is_stack()) { 928 __ str(rHandle, Address(sp, reg2offset_out(dst.first()))); 929 } 930 } 931 932 // A float arg may have to do float reg int reg conversion 933 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 934 if (src.first()->is_stack()) { 935 if (dst.first()->is_stack()) { 936 // stack to stack 937 // Have no vfp scratch registers, so copy via gpr 938 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 939 __ str(rscratch1, Address(sp, reg2offset_out(dst.first()))); 940 } else { 941 // stack to reg 942 __ vldr_f32(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 943 } 944 } else if (dst.first()->is_stack()) { 945 // reg to stack 946 __ vstr_f32(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 947 } else { 948 if (dst.first() != src.first()) { 949 __ vmov_f32(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 950 } 951 } 952 } 953 954 // A long move 955 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 956 if (src.first()->is_stack()) { 957 if (dst.first()->is_stack()) { 958 // stack to stack 959 __ ldrd(rscratch1, rscratch2, Address(rfp, reg2offset_in(src.first()))); 960 __ strd(rscratch1, rscratch2, Address(sp, reg2offset_out(dst.first()))); 961 } else { 962 // stack to reg 963 __ ldrd(dst.first()->as_Register(), dst.second()->as_Register(), 964 Address(rfp, reg2offset_in(src.first()))); 965 } 966 } else if (dst.first()->is_stack()) { 967 // reg to stack 968 __ strd(src.first()->as_Register(), src.second()->as_Register(), 969 Address(sp, reg2offset_out(dst.first()))); 970 } else { 971 // reg to reg 972 if (dst.first() != src.first()) { 973 if (dst.first() != src.second()) { 974 __ mov(dst.first()->as_Register(), src.first()->as_Register()); 975 __ mov(dst.second()->as_Register(), src.second()->as_Register()); 976 } else { 977 __ mov(dst.second()->as_Register(), src.second()->as_Register()); 978 __ mov(dst.first()->as_Register(), src.first()->as_Register()); 979 } 980 } 981 } 982 } 983 984 // A double move 985 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 986 if (src.first()->is_stack()) { 987 if (dst.first()->is_stack()) { 988 // stack to stack 989 // Have no vfp scratch registers, so copy via gpr 990 __ ldrd(rscratch1, rscratch2, Address(rfp, reg2offset_in(src.first()))); 991 __ strd(rscratch1, rscratch2, Address(sp, reg2offset_out(dst.first()))); 992 } else { 993 // stack to reg 994 __ vldr_f64(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 995 } 996 } else if (dst.first()->is_stack()) { 997 // reg to stack 998 __ vstr_f64(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 999 } else { 1000 if (dst.first() != src.first()) { 1001 __ vmov_f64(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 1002 } 1003 } 1004 } 1005 1006 1007 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1008 // We always ignore the frame_slots arg and just use the space just below frame pointer 1009 // which by this time is free to use 1010 switch (ret_type) { 1011 case T_FLOAT: 1012 __ vstr_f32(f0, Address(rfp, -2 * wordSize)); 1013 break; 1014 case T_DOUBLE: 1015 __ vstr_f64(d0, Address(rfp, -3 * wordSize)); 1016 break; 1017 case T_LONG: 1018 __ strd(r0, r1, Address(rfp, -3 * wordSize)); 1019 break; 1020 case T_VOID: 1021 break; 1022 default: 1023 __ str(r0, Address(rfp, -2 * wordSize)); 1024 break; 1025 } 1026 } 1027 1028 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1029 // We always ignore the frame_slots arg and just use the space just below frame pointer 1030 // which by this time is free to use 1031 switch (ret_type) { 1032 case T_FLOAT: 1033 __ vldr_f32(d0, Address(rfp, -2 * wordSize)); 1034 break; 1035 case T_DOUBLE: 1036 __ vldr_f64(d0, Address(rfp, -3 * wordSize)); 1037 break; 1038 case T_LONG: 1039 __ ldrd(r0, r1, Address(rfp, -3 * wordSize)); 1040 break; 1041 case T_VOID: 1042 break; 1043 default: 1044 __ ldr(r0, Address(rfp, -2 * wordSize)); 1045 break; 1046 } 1047 } 1048 1049 static int save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1050 RegSet x; 1051 int saved_slots = 0; 1052 for ( int i = first_arg ; i < arg_count ; i++ ) { 1053 if (args[i].first()->is_Register()) { 1054 x = x + args[i].first()->as_Register(); 1055 ++saved_slots; 1056 } else if (args[i].first()->is_FloatRegister()) { 1057 FloatRegister fr = args[i].first()->as_FloatRegister(); 1058 1059 if (args[i].second()->is_FloatRegister()) { 1060 assert(args[i].is_single_phys_reg(), "doubles should be 2 consequents float regs"); 1061 __ decrement(sp, 2 * wordSize); 1062 __ vstr_f64(fr, Address(sp)); 1063 saved_slots += 2; 1064 } else { 1065 __ decrement(sp, wordSize); 1066 __ vstr_f32(fr, Address(sp)); 1067 ++saved_slots; 1068 } 1069 } 1070 } 1071 __ push(x, sp); 1072 return saved_slots; 1073 } 1074 1075 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1076 RegSet x; 1077 for ( int i = first_arg ; i < arg_count ; i++ ) { 1078 if (args[i].first()->is_Register()) { 1079 x = x + args[i].first()->as_Register(); 1080 } else { 1081 ; 1082 } 1083 } 1084 __ pop(x, sp); 1085 for ( int i = first_arg ; i < arg_count ; i++ ) { 1086 if (args[i].first()->is_Register()) { 1087 ; 1088 } else if (args[i].first()->is_FloatRegister()) { 1089 FloatRegister fr = args[i].first()->as_FloatRegister(); 1090 1091 if (args[i].second()->is_FloatRegister()) { 1092 assert(args[i].is_single_phys_reg(), "doubles should be 2 consequents float regs"); 1093 __ vstr_f64(fr, Address(sp)); 1094 __ increment(sp, 2 * wordSize); 1095 } else { 1096 __ vstr_f32(fr, Address(sp)); 1097 __ increment(sp, wordSize); 1098 } 1099 } 1100 } 1101 } 1102 1103 1104 // Check GC_locker::needs_gc and enter the runtime if it's true. This 1105 // keeps a new JNI critical region from starting until a GC has been 1106 // forced. Save down any oops in registers and describe them in an 1107 // OopMap. 1108 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1109 int stack_slots, 1110 int total_c_args, 1111 int total_in_args, 1112 int arg_save_area, 1113 OopMapSet* oop_maps, 1114 VMRegPair* in_regs, 1115 BasicType* in_sig_bt) { Unimplemented(); } 1116 1117 // Unpack an array argument into a pointer to the body and the length 1118 // if the array is non-null, otherwise pass 0 for both. 1119 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); } 1120 1121 1122 class ComputeMoveOrder: public StackObj { 1123 class MoveOperation: public ResourceObj { 1124 friend class ComputeMoveOrder; 1125 private: 1126 VMRegPair _src; 1127 VMRegPair _dst; 1128 int _src_index; 1129 int _dst_index; 1130 bool _processed; 1131 MoveOperation* _next; 1132 MoveOperation* _prev; 1133 1134 static int get_id(VMRegPair r) { Unimplemented(); return 0; } 1135 1136 public: 1137 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst): 1138 _src(src) 1139 , _src_index(src_index) 1140 , _dst(dst) 1141 , _dst_index(dst_index) 1142 , _next(NULL) 1143 , _prev(NULL) 1144 , _processed(false) { Unimplemented(); } 1145 1146 VMRegPair src() const { Unimplemented(); return _src; } 1147 int src_id() const { Unimplemented(); return 0; } 1148 int src_index() const { Unimplemented(); return 0; } 1149 VMRegPair dst() const { Unimplemented(); return _src; } 1150 void set_dst(int i, VMRegPair dst) { Unimplemented(); } 1151 int dst_index() const { Unimplemented(); return 0; } 1152 int dst_id() const { Unimplemented(); return 0; } 1153 MoveOperation* next() const { Unimplemented(); return 0; } 1154 MoveOperation* prev() const { Unimplemented(); return 0; } 1155 void set_processed() { Unimplemented(); } 1156 bool is_processed() const { Unimplemented(); return 0; } 1157 1158 // insert 1159 void break_cycle(VMRegPair temp_register) { Unimplemented(); } 1160 1161 void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); } 1162 }; 1163 1164 private: 1165 GrowableArray<MoveOperation*> edges; 1166 1167 public: 1168 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs, 1169 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); } 1170 1171 // Collected all the move operations 1172 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); } 1173 1174 // Walk the edges breaking cycles between moves. The result list 1175 // can be walked in order to produce the proper set of loads 1176 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; } 1177 }; 1178 1179 1180 static void rt_call(MacroAssembler* masm, address dest) { 1181 CodeBlob *cb = CodeCache::find_blob(dest); 1182 if (cb) { 1183 __ far_call(RuntimeAddress(dest), NULL, rscratch2); 1184 } else { 1185 __ lea(rscratch2, RuntimeAddress(dest)); 1186 __ bl(rscratch2); 1187 __ maybe_isb(); 1188 } 1189 } 1190 1191 static void verify_oop_args(MacroAssembler* masm, 1192 methodHandle method, 1193 const BasicType* sig_bt, 1194 const VMRegPair* regs) { 1195 Register temp_reg = rscratch2; // not part of any compiled calling seq 1196 if (VerifyOops) { 1197 for (int i = 0; i < method->size_of_parameters(); i++) { 1198 if (sig_bt[i] == T_OBJECT || 1199 sig_bt[i] == T_ARRAY) { 1200 VMReg r = regs[i].first(); 1201 assert(r->is_valid(), "bad oop arg"); 1202 if (r->is_stack()) { 1203 __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1204 __ verify_oop(temp_reg); 1205 } else { 1206 __ verify_oop(r->as_Register()); 1207 } 1208 } 1209 } 1210 } 1211 } 1212 1213 static void gen_special_dispatch(MacroAssembler* masm, 1214 methodHandle method, 1215 const BasicType* sig_bt, 1216 const VMRegPair* regs) { 1217 verify_oop_args(masm, method, sig_bt, regs); 1218 vmIntrinsics::ID iid = method->intrinsic_id(); 1219 1220 // Now write the args into the outgoing interpreter space 1221 bool has_receiver = false; 1222 Register receiver_reg = noreg; 1223 int member_arg_pos = -1; 1224 Register member_reg = noreg; 1225 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1226 if (ref_kind != 0) { 1227 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1228 member_reg = r4; 1229 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1230 } else if (iid == vmIntrinsics::_invokeBasic) { 1231 has_receiver = true; 1232 } else { 1233 fatal(err_msg_res("unexpected intrinsic id %d", iid)); 1234 } 1235 1236 if (member_reg != noreg) { 1237 // Load the member_arg into register, if necessary. 1238 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1239 VMReg r = regs[member_arg_pos].first(); 1240 if (r->is_stack()) { 1241 __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1242 } else { 1243 // no data motion is needed 1244 member_reg = r->as_Register(); 1245 } 1246 } 1247 1248 if (has_receiver) { 1249 // Make sure the receiver is loaded into a register. 1250 assert(method->size_of_parameters() > 0, "oob"); 1251 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1252 VMReg r = regs[0].first(); 1253 assert(r->is_valid(), "bad receiver arg"); 1254 if (r->is_stack()) { 1255 // Porting note: This assumes that compiled calling conventions always 1256 // pass the receiver oop in a register. If this is not true on some 1257 // platform, pick a temp and load the receiver from stack. 1258 fatal("receiver always in a register"); 1259 } else { 1260 // no data motion is needed 1261 receiver_reg = r->as_Register(); 1262 } 1263 } 1264 1265 // Figure out which address we are really jumping to: 1266 MethodHandles::generate_method_handle_dispatch(masm, iid, 1267 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1268 } 1269 1270 // --------------------------------------------------------------------------- 1271 // Generate a native wrapper for a given method. The method takes arguments 1272 // in the Java compiled code convention, marshals them to the native 1273 // convention (handlizes oops, etc), transitions to native, makes the call, 1274 // returns to java state (possibly blocking), unhandlizes any result and 1275 // returns. 1276 // 1277 // Critical native functions are a shorthand for the use of 1278 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1279 // functions. The wrapper is expected to unpack the arguments before 1280 // passing them to the callee and perform checks before and after the 1281 // native call to ensure that they GC_locker 1282 // lock_critical/unlock_critical semantics are followed. Some other 1283 // parts of JNI setup are skipped like the tear down of the JNI handle 1284 // block and the check for pending exceptions it's impossible for them 1285 // to be thrown. 1286 // 1287 // They are roughly structured like this: 1288 // if (GC_locker::needs_gc()) 1289 // SharedRuntime::block_for_jni_critical(); 1290 // tranistion to thread_in_native 1291 // unpack arrray arguments and call native entry point 1292 // check for safepoint in progress 1293 // check if any thread suspend flags are set 1294 // call into JVM and possible unlock the JNI critical 1295 // if a GC was suppressed while in the critical native. 1296 // transition back to thread_in_Java 1297 // return to caller 1298 // 1299 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1300 methodHandle method, 1301 int compile_id, 1302 BasicType* in_sig_bt, 1303 VMRegPair* in_regs, 1304 BasicType ret_type) { 1305 if (method->is_method_handle_intrinsic()) { 1306 vmIntrinsics::ID iid = method->intrinsic_id(); 1307 intptr_t start = (intptr_t)__ pc(); 1308 int vep_offset = ((intptr_t)__ pc()) - start; 1309 1310 // First instruction must be a nop as it may need to be patched on deoptimisation 1311 __ nop(); 1312 gen_special_dispatch(masm, 1313 method, 1314 in_sig_bt, 1315 in_regs); 1316 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1317 __ flush(); 1318 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1319 return nmethod::new_native_nmethod(method, 1320 compile_id, 1321 masm->code(), 1322 vep_offset, 1323 frame_complete, 1324 stack_slots / VMRegImpl::slots_per_word, 1325 in_ByteSize(-1), 1326 in_ByteSize(-1), 1327 (OopMapSet*)NULL); 1328 } 1329 1330 bool is_critical_native = true; 1331 address native_func = method->critical_native_function(); 1332 if (native_func == NULL) { 1333 native_func = method->native_function(); 1334 is_critical_native = false; 1335 } 1336 assert(native_func != NULL, "must have function"); 1337 1338 // An OopMap for lock (and class if static) 1339 OopMapSet *oop_maps = new OopMapSet(); 1340 intptr_t start = (intptr_t)__ pc(); 1341 1342 // We have received a description of where all the java arg are located 1343 // on entry to the wrapper. We need to convert these args to where 1344 // the jni function will expect them. To figure out where they go 1345 // we convert the java signature to a C signature by inserting 1346 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1347 1348 const int total_in_args = method->size_of_parameters(); 1349 int total_c_args = total_in_args; 1350 if (!is_critical_native) { 1351 total_c_args += 1; 1352 if (method->is_static()) { 1353 total_c_args++; 1354 } 1355 } else { 1356 for (int i = 0; i < total_in_args; i++) { 1357 if (in_sig_bt[i] == T_ARRAY) { 1358 total_c_args++; 1359 } 1360 } 1361 } 1362 1363 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1364 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1365 BasicType* in_elem_bt = NULL; 1366 1367 int argc = 0; 1368 if (!is_critical_native) { 1369 out_sig_bt[argc++] = T_ADDRESS; 1370 if (method->is_static()) { 1371 out_sig_bt[argc++] = T_OBJECT; 1372 } 1373 1374 for (int i = 0; i < total_in_args ; i++ ) { 1375 out_sig_bt[argc++] = in_sig_bt[i]; 1376 } 1377 } else { 1378 Thread* THREAD = Thread::current(); 1379 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); 1380 SignatureStream ss(method->signature()); 1381 for (int i = 0; i < total_in_args ; i++ ) { 1382 if (in_sig_bt[i] == T_ARRAY) { 1383 // Arrays are passed as int, elem* pair 1384 out_sig_bt[argc++] = T_INT; 1385 out_sig_bt[argc++] = T_ADDRESS; 1386 Symbol* atype = ss.as_symbol(CHECK_NULL); 1387 const char* at = atype->as_C_string(); 1388 if (strlen(at) == 2) { 1389 assert(at[0] == '[', "must be"); 1390 switch (at[1]) { 1391 case 'B': in_elem_bt[i] = T_BYTE; break; 1392 case 'C': in_elem_bt[i] = T_CHAR; break; 1393 case 'D': in_elem_bt[i] = T_DOUBLE; break; 1394 case 'F': in_elem_bt[i] = T_FLOAT; break; 1395 case 'I': in_elem_bt[i] = T_INT; break; 1396 case 'J': in_elem_bt[i] = T_LONG; break; 1397 case 'S': in_elem_bt[i] = T_SHORT; break; 1398 case 'Z': in_elem_bt[i] = T_BOOLEAN; break; 1399 default: ShouldNotReachHere(); 1400 } 1401 } 1402 } else { 1403 out_sig_bt[argc++] = in_sig_bt[i]; 1404 in_elem_bt[i] = T_VOID; 1405 } 1406 if (in_sig_bt[i] != T_VOID) { 1407 assert(in_sig_bt[i] == ss.type(), "must match"); 1408 ss.next(); 1409 } 1410 } 1411 } 1412 1413 // Now figure out where the args must be stored and how much stack space 1414 // they require. 1415 int out_arg_slots; 1416 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 1417 1418 // Compute framesize for the wrapper. We need to handlize all oops in 1419 // incoming registers 1420 1421 // Calculate the total number of stack slots we will need. 1422 1423 // First count the abi requirement plus all of the outgoing args 1424 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1425 1426 // Now the space for the inbound oop handle area 1427 int total_save_slots = -1; 1428 if (is_critical_native) { 1429 // Critical natives may have to call out so they need a save area 1430 // for register arguments. 1431 int double_slots = 0; 1432 int single_slots = 0; 1433 for ( int i = 0; i < total_in_args; i++) { 1434 if (in_regs[i].first()->is_Register()) { 1435 const Register reg = in_regs[i].first()->as_Register(); 1436 switch (in_sig_bt[i]) { 1437 case T_ARRAY: // critical array (uses 2 slots on LP64) 1438 case T_BOOLEAN: 1439 case T_BYTE: 1440 case T_SHORT: 1441 case T_CHAR: 1442 case T_INT: single_slots++; break; 1443 case T_LONG: double_slots++; break; 1444 default: ShouldNotReachHere(); 1445 } 1446 } else if (in_regs[i].first()->is_FloatRegister()) { 1447 ShouldNotReachHere(); 1448 } 1449 } 1450 total_save_slots = double_slots * 2 + single_slots; 1451 // align the save area 1452 if (double_slots != 0) { 1453 stack_slots = round_to(stack_slots, 2); 1454 } 1455 } else { 1456 total_save_slots = 4 * VMRegImpl::slots_per_word; // 4 arguments passed in registers 1457 } 1458 assert(total_save_slots != -1, "initialize total_save_slots!"); 1459 1460 int oop_handle_offset = stack_slots; 1461 stack_slots += total_save_slots; 1462 1463 // Now any space we need for handlizing a klass if static method 1464 1465 int klass_slot_offset = 0; 1466 int klass_offset = -1; 1467 int lock_slot_offset = 0; 1468 bool is_static = false; 1469 1470 if (method->is_static()) { 1471 klass_slot_offset = stack_slots; 1472 stack_slots += VMRegImpl::slots_per_word; 1473 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1474 is_static = true; 1475 } 1476 1477 // Plus a lock if needed 1478 1479 if (method->is_synchronized()) { 1480 lock_slot_offset = stack_slots; 1481 stack_slots += VMRegImpl::slots_per_word; 1482 } 1483 1484 // Now a place (+2) to save return values or temp during shuffling 1485 // + 2 for return address (which we own) and saved rfp 1486 stack_slots += 4; 1487 1488 // Ok The space we have allocated will look like: 1489 // 1490 // 1491 // FP-> | saved lr | 1492 // |---------------------| 1493 // | saved fp | 1494 // |---------------------| 1495 // | 2 slots for moves | 1496 // |---------------------| 1497 // | lock box (if sync) | 1498 // |---------------------| <- lock_slot_offset 1499 // | klass (if static) | 1500 // |---------------------| <- klass_slot_offset 1501 // | oopHandle area | 1502 // |---------------------| <- oop_handle_offset (8 java arg registers) 1503 // | outbound memory | 1504 // | based arguments | 1505 // | | 1506 // |---------------------| 1507 // | | 1508 // SP-> | out_preserved_slots | 1509 // 1510 // 1511 1512 1513 // Now compute actual number of stack words we need rounding to make 1514 // stack properly aligned. 1515 stack_slots = round_to(stack_slots, StackAlignmentInSlots); 1516 1517 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 1518 1519 // First thing make an ic check to see if we should even be here 1520 1521 // We are free to use all registers as temps without saving them and 1522 // restoring them except rfp. rfp is the only callee save register 1523 // as far as the interpreter and the compiler(s) are concerned. 1524 1525 1526 const Register ic_reg = rscratch2; 1527 const Register receiver = j_rarg0; 1528 1529 Label hit; 1530 Label exception_pending; 1531 1532 assert_different_registers(ic_reg, receiver, rscratch1); 1533 __ verify_oop(receiver); 1534 __ cmp_klass(receiver, ic_reg, rscratch1); 1535 __ b(hit, Assembler::EQ); 1536 1537 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1538 1539 // Verified entry point must be aligned 1540 __ align(8); 1541 1542 __ bind(hit); 1543 1544 #ifdef ASSERT 1545 __ mov(ic_reg, 0xdead); // trash ic_reg(rscratch2), as used as real scratch further 1546 #endif 1547 1548 int vep_offset = ((intptr_t)__ pc()) - start; 1549 1550 // Generate stack overflow check 1551 1552 // If we have to make this method not-entrant we'll overwrite its 1553 // first instruction with a jump. For this action to be legal we 1554 // must ensure that this first instruction is a B, BL, NOP, BKPT, 1555 // SVC, HVC, or SMC. Make it a NOP. 1556 __ nop(); 1557 1558 if (UseStackBanging) { 1559 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size()); 1560 } else { 1561 Unimplemented(); 1562 } 1563 1564 // Generate a new frame for the wrapper. 1565 __ enter(); 1566 // -2 because return address is already present and so is saved rfp 1567 __ sub(sp, sp, stack_size - 2*wordSize); 1568 1569 // Frame is now completed as far as size and linkage. 1570 int frame_complete = ((intptr_t)__ pc()) - start; 1571 1572 if (is_critical_native) { 1573 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args, 1574 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 1575 } 1576 1577 // 1578 // We immediately shuffle the arguments so that any vm call we have to 1579 // make from here on out (sync slow path, jvmti, etc.) we will have 1580 // captured the oops from our caller and have a valid oopMap for 1581 // them. 1582 1583 // ----------------- 1584 // The Grand Shuffle 1585 1586 // The Java calling convention is either equal (linux) or denser (win64) than the 1587 // c calling convention. However the because of the jni_env argument the c calling 1588 // convention always has at least one more (and two for static) arguments than Java. 1589 // Therefore if we move the args from java -> c backwards then we will never have 1590 // a register->register conflict and we don't have to build a dependency graph 1591 // and figure out how to break any cycles. 1592 // 1593 1594 // Record sp-based slot for receiver on stack for non-static methods 1595 int receiver_offset = -1; 1596 1597 // This is a trick. We double the stack slots so we can claim 1598 // the oops in the caller's frame. Since we are sure to have 1599 // more args than the caller doubling is enough to make 1600 // sure we can capture all the incoming oop args from the 1601 // caller. 1602 // 1603 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1604 1605 // Mark location of rfp (someday) 1606 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp)); 1607 1608 1609 #ifdef ASSERT 1610 bool reg_destroyed[RegisterImpl::number_of_registers]; 1611 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 1612 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 1613 reg_destroyed[r] = false; 1614 } 1615 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 1616 freg_destroyed[f] = false; 1617 } 1618 1619 #endif // ASSERT 1620 1621 // This may iterate in two different directions depending on the 1622 // kind of native it is. The reason is that for regular JNI natives 1623 // the incoming and outgoing registers are offset upwards and for 1624 // critical natives they are offset down. 1625 GrowableArray<int> arg_order(2 * total_in_args); 1626 VMRegPair tmp_vmreg; 1627 tmp_vmreg.set1(rscratch2->as_VMReg()); 1628 1629 if (!is_critical_native) { 1630 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { 1631 arg_order.push(i); 1632 arg_order.push(c_arg); 1633 } 1634 } else { 1635 // Compute a valid move order, using tmp_vmreg to break any cycles 1636 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg); 1637 } 1638 1639 int temploc = -1; 1640 for (int ai = 0; ai < arg_order.length(); ai += 2) { 1641 int i = arg_order.at(ai); 1642 int c_arg = arg_order.at(ai + 1); 1643 __ block_comment(err_msg("move %d -> %d", i, c_arg)); 1644 if (c_arg == -1) { 1645 assert(is_critical_native, "should only be required for critical natives"); 1646 // This arg needs to be moved to a temporary 1647 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register()); 1648 in_regs[i] = tmp_vmreg; 1649 temploc = i; 1650 continue; 1651 } else if (i == -1) { 1652 assert(is_critical_native, "should only be required for critical natives"); 1653 // Read from the temporary location 1654 assert(temploc != -1, "must be valid"); 1655 i = temploc; 1656 temploc = -1; 1657 } 1658 #ifdef ASSERT 1659 if (in_regs[i].first()->is_Register()) { 1660 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); 1661 } else if (in_regs[i].first()->is_FloatRegister()) { 1662 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!"); 1663 } 1664 if (out_regs[c_arg].first()->is_Register()) { 1665 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 1666 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 1667 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; 1668 } 1669 #endif // ASSERT 1670 switch (in_sig_bt[i]) { 1671 case T_ARRAY: 1672 if (is_critical_native) { 1673 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); 1674 c_arg++; 1675 #ifdef ASSERT 1676 if (out_regs[c_arg].first()->is_Register()) { 1677 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 1678 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 1679 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; 1680 } 1681 #endif 1682 break; 1683 } 1684 case T_OBJECT: 1685 assert(!is_critical_native, "no oop arguments"); 1686 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 1687 ((i == 0) && (!is_static)), 1688 &receiver_offset); 1689 break; 1690 case T_VOID: 1691 break; 1692 1693 case T_FLOAT: 1694 float_move(masm, in_regs[i], out_regs[c_arg]); 1695 break; 1696 1697 case T_DOUBLE: 1698 assert( i + 1 < total_in_args && 1699 in_sig_bt[i + 1] == T_VOID && 1700 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 1701 double_move(masm, in_regs[i], out_regs[c_arg]); 1702 break; 1703 1704 case T_LONG : 1705 long_move(masm, in_regs[i], out_regs[c_arg]); 1706 break; 1707 1708 case T_BOOLEAN : 1709 case T_BYTE : 1710 case T_CHAR : 1711 case T_SHORT : 1712 case T_INT : 1713 move_int(masm, in_regs[i], out_regs[c_arg]); 1714 break; 1715 1716 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 1717 case T_NARROWOOP : 1718 case T_METADATA : 1719 case T_NARROWKLASS : 1720 default: 1721 ShouldNotReachHere(); 1722 } 1723 } 1724 1725 // point c_arg at the first arg that is already loaded in case we 1726 // need to spill before we call out 1727 int c_arg = total_c_args - total_in_args; 1728 1729 // We use r4 as the oop handle for the receiver/klass 1730 // It is callee save so it survives the call to native 1731 1732 const Register oop_handle_reg = r4; 1733 1734 // Pre-load a static method's oop. Used both by locking code and 1735 // the normal JNI call code. 1736 if (method->is_static() && !is_critical_native) { 1737 1738 // load oop into a register 1739 __ movoop(oop_handle_reg, 1740 JNIHandles::make_local(method->method_holder()->java_mirror()), 1741 /*immediate*/true); 1742 1743 // Now handlize the static class mirror it's known not-null. 1744 __ str(oop_handle_reg, Address(sp, klass_offset)); 1745 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 1746 1747 // Now get the handle 1748 __ lea(oop_handle_reg, Address(sp, klass_offset)); 1749 // store the klass handle as second argument 1750 __ mov(c_rarg1, oop_handle_reg); 1751 // and protect the arg if we must spill 1752 c_arg--; 1753 } 1754 1755 // Change state to native (we save the return address in the thread, since it might not 1756 // be pushed on the stack when we do a a stack traversal). It is enough that the pc() 1757 // points into the right code segment. It does not have to be the correct return pc. 1758 // We use the same pc/oopMap repeatedly when we call out 1759 1760 intptr_t the_pc = (intptr_t) __ pc(); 1761 oop_maps->add_gc_map(the_pc - start, map); 1762 1763 __ set_last_Java_frame(sp, noreg, (address)the_pc, rscratch1); 1764 1765 1766 // We have all of the arguments setup at this point. We must not touch any register 1767 // argument registers at this point (what if we save/restore them there are no oop? 1768 1769 #ifdef DTRACE_ENABLED 1770 { 1771 SkipIfEqual skip(masm, &DTraceMethodProbes, false); 1772 // protect the args we've loaded 1773 (void) save_args(masm, total_c_args, c_arg, out_regs); 1774 __ mov_metadata(c_rarg1, method()); 1775 __ call_VM_leaf( 1776 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 1777 rthread, c_rarg1); 1778 restore_args(masm, total_c_args, c_arg, out_regs); 1779 } 1780 #endif 1781 1782 // RedefineClasses() tracing support for obsolete method entry 1783 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 1784 // protect the args we've loaded 1785 (void) save_args(masm, total_c_args, c_arg, out_regs); 1786 __ mov_metadata(c_rarg1, method()); 1787 __ call_VM_leaf( 1788 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1789 rthread, c_rarg1); 1790 restore_args(masm, total_c_args, c_arg, out_regs); 1791 } 1792 1793 // Lock a synchronized method 1794 1795 // Register definitions used by locking and unlocking 1796 1797 Label slow_path_lock; 1798 Label lock_done; 1799 1800 if (method->is_synchronized()) { 1801 assert(!is_critical_native, "unhandled"); 1802 1803 // registers below are not used to pass parameters 1804 // and they are caller save in C1 1805 // => safe to use as temporary here 1806 #ifdef COMPILER2 1807 stop("fix temporary register set below"); 1808 #endif 1809 const Register swap_reg = r5; 1810 const Register obj_reg = r6; // Will contain the oop 1811 const Register lock_reg = r7; // Address of compiler lock object (BasicLock) 1812 1813 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); 1814 1815 // Get the handle (the 2nd argument) 1816 __ mov(oop_handle_reg, c_rarg1); 1817 1818 // Get address of the box 1819 1820 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1821 1822 // Load the oop from the handle 1823 __ ldr(obj_reg, Address(oop_handle_reg, 0)); 1824 1825 if (UseBiasedLocking) { 1826 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch2, false, lock_done, &slow_path_lock); 1827 } 1828 1829 // Load (object->mark() | 1) into swap_reg %r0 1830 __ ldr(swap_reg, Address(obj_reg, 0)); 1831 __ orr(swap_reg, swap_reg, 1); 1832 1833 // Save (object->mark() | 1) into BasicLock's displaced header 1834 __ str(swap_reg, Address(lock_reg, mark_word_offset)); 1835 1836 // src -> dest iff dest == r0 else r0 <- dest 1837 { Label here; 1838 __ cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, lock_done, &slow_path_lock); 1839 } 1840 1841 // Slow path will re-enter here 1842 __ bind(lock_done); 1843 } 1844 1845 1846 // Finally just about ready to make the JNI call 1847 1848 1849 // get JNIEnv* which is first argument to native 1850 if (!is_critical_native) { 1851 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset()))); 1852 } 1853 1854 // Now set thread in native 1855 __ mov(rscratch1, _thread_in_native); 1856 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1857 __ dmb(Assembler::ISH); 1858 __ str(rscratch1, rscratch2); 1859 1860 // Do the call 1861 rt_call(masm, native_func); 1862 1863 // Unpack native results. 1864 switch (ret_type) { 1865 case T_BOOLEAN: __ uxtb(r0, r0); break; 1866 case T_CHAR : __ uxth(r0, r0); break; 1867 case T_BYTE : __ sxtb(r0, r0); break; 1868 case T_SHORT : __ sxth(r0, r0); break; 1869 case T_INT : break; 1870 case T_DOUBLE : 1871 case T_FLOAT : 1872 // Result is in d0 we'll save as needed 1873 break; 1874 case T_ARRAY: // Really a handle 1875 case T_OBJECT: // Really a handle 1876 break; // can't de-handlize until after safepoint check 1877 case T_VOID: break; 1878 case T_LONG: break; 1879 default : ShouldNotReachHere(); 1880 } 1881 1882 // Switch thread to "native transition" state before reading the synchronization state. 1883 // This additional state is necessary because reading and testing the synchronization 1884 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1885 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1886 // VM thread changes sync state to synchronizing and suspends threads for GC. 1887 // Thread A is resumed to finish this native method, but doesn't block here since it 1888 // didn't see any synchronization is progress, and escapes. 1889 __ mov(rscratch1, _thread_in_native_trans); 1890 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1891 __ dmb(Assembler::ISH); 1892 __ str(rscratch1, rscratch2); 1893 1894 if(os::is_MP()) { 1895 if (UseMembar) { 1896 // Force this write out before the read below 1897 __ membar(Assembler::AnyAny); 1898 } else { 1899 // Write serialization page so VM thread can do a pseudo remote membar. 1900 // We use the current thread pointer to calculate a thread specific 1901 // offset to write to within the page. This minimizes bus traffic 1902 // due to cache line collision. 1903 __ serialize_memory(rthread, rscratch1); 1904 } 1905 } 1906 1907 Label after_transition; 1908 1909 // check for safepoint operation in progress and/or pending suspend requests 1910 { 1911 Label Continue; 1912 1913 __ mov(rscratch1, ExternalAddress((address)SafepointSynchronize::address_of_state())); 1914 __ ldr(rscratch1, Address(rscratch1)); 1915 __ cmp(rscratch1, SafepointSynchronize::_not_synchronized); 1916 1917 Label L; 1918 __ b(L, Assembler::NE); 1919 __ ldr(rscratch1, Address(rthread, JavaThread::suspend_flags_offset())); 1920 __ cbz(rscratch1, Continue); 1921 __ bind(L); 1922 1923 // Don't use call_VM as it will see a possible pending exception and forward it 1924 // and never return here preventing us from clearing _last_native_pc down below. 1925 // 1926 save_native_result(masm, ret_type, stack_slots); 1927 __ mov(c_rarg0, rthread); 1928 #ifndef PRODUCT 1929 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 1930 #endif 1931 if (!is_critical_native) { 1932 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1933 } else { 1934 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition))); 1935 } 1936 __ bl(rscratch1); 1937 __ maybe_isb(); 1938 // Restore any method result value 1939 restore_native_result(masm, ret_type, stack_slots); 1940 1941 if (is_critical_native) { 1942 // The call above performed the transition to thread_in_Java so 1943 // skip the transition logic below. 1944 __ b(after_transition); 1945 } 1946 1947 __ bind(Continue); 1948 } 1949 1950 // change thread state 1951 __ mov(rscratch1, _thread_in_Java); 1952 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1953 __ dmb(Assembler::ISH); 1954 __ str(rscratch1, rscratch2); 1955 __ bind(after_transition); 1956 1957 Label reguard; 1958 Label reguard_done; 1959 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset())); 1960 __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled); 1961 __ b(reguard, Assembler::EQ); 1962 __ bind(reguard_done); 1963 1964 // native result if any is live 1965 1966 // Unlock 1967 Label unlock_done; 1968 Label slow_path_unlock; 1969 if (method->is_synchronized()) { 1970 const Register obj_reg = r2; // Will contain the oop 1971 const Register lock_reg = rscratch1; // Address of compiler lock object (BasicLock) 1972 const Register old_hdr = r3; // value of old header at unlock time 1973 1974 // Get locked oop from the handle we passed to jni 1975 __ ldr(obj_reg, Address(oop_handle_reg, 0)); 1976 1977 if (UseBiasedLocking) { 1978 __ biased_locking_exit(obj_reg, old_hdr, unlock_done); 1979 } 1980 1981 // Simple recursive lock? 1982 // get address of the stack lock 1983 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1984 1985 // get old displaced header 1986 __ ldr(old_hdr, Address(lock_reg, 0)); 1987 __ cbz(old_hdr, unlock_done); 1988 1989 // Atomic swap old header if oop still contains the stack lock 1990 Label succeed; 1991 __ cmpxchgptr(lock_reg, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock); 1992 __ bind(succeed); 1993 1994 // slow path re-enters here 1995 __ bind(unlock_done); 1996 } 1997 1998 #ifdef DTRACE_ENABLED 1999 { 2000 SkipIfEqual skip(masm, &DTraceMethodProbes, false); 2001 save_native_result(masm, ret_type, stack_slots); 2002 __ mov_metadata(c_rarg1, method()); 2003 __ call_VM_leaf( 2004 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2005 rthread, c_rarg1); 2006 restore_native_result(masm, ret_type, stack_slots); 2007 } 2008 #endif 2009 2010 __ reset_last_Java_frame(false, true); 2011 2012 // Unpack oop result 2013 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2014 Label L; 2015 __ cbz(r0, L); 2016 __ ldr(r0, Address(r0, 0)); 2017 __ bind(L); 2018 __ verify_oop(r0); 2019 } 2020 2021 if (!is_critical_native) { 2022 // reset handle block 2023 __ mov(rscratch1, 0); 2024 __ ldr(r2, Address(rthread, JavaThread::active_handles_offset())); 2025 __ str(rscratch1, Address(r2, JNIHandleBlock::top_offset_in_bytes())); 2026 } 2027 2028 __ leave(); 2029 2030 if (!is_critical_native) { 2031 // Any exception pending? 2032 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2033 __ cbnz(rscratch1, exception_pending); 2034 } 2035 2036 // We're done 2037 __ b(lr); 2038 2039 // Unexpected paths are out of line and go here 2040 2041 if (!is_critical_native) { 2042 // forward the exception 2043 __ bind(exception_pending); 2044 2045 // and forward the exception 2046 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2047 } 2048 2049 // Slow path locking & unlocking 2050 if (method->is_synchronized()) { 2051 2052 // BEGIN Slow path lock 2053 __ bind(slow_path_lock); 2054 2055 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM 2056 // args are (oop obj, BasicLock* lock, JavaThread* thread) 2057 2058 // protect the args we've loaded 2059 const int extra_words = save_args(masm, total_c_args, c_arg, out_regs); 2060 2061 __ ldr(c_rarg0, Address(oop_handle_reg)); 2062 __ lea(c_rarg1, Address(sp, (extra_words + lock_slot_offset) * VMRegImpl::stack_slot_size)); 2063 __ mov(c_rarg2, rthread); 2064 2065 // Not a leaf but we have last_Java_frame setup as we want 2066 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); 2067 restore_args(masm, total_c_args, c_arg, out_regs); 2068 2069 #ifdef ASSERT 2070 { Label L; 2071 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2072 __ cbz(rscratch1, L); 2073 __ stop("no pending exception allowed on exit from monitorenter"); 2074 __ bind(L); 2075 } 2076 #endif 2077 __ b(lock_done); 2078 2079 // END Slow path lock 2080 2081 // BEGIN Slow path unlock 2082 __ bind(slow_path_unlock); 2083 2084 // If we haven't already saved the native result we must save it now as xmm registers 2085 // are still exposed. 2086 2087 save_native_result(masm, ret_type, stack_slots); 2088 2089 __ ldr(c_rarg0, Address(oop_handle_reg)); 2090 __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2091 2092 // Save pending exception around call to VM (which contains an EXCEPTION_MARK) 2093 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2094 __ mov(rscratch2, 0); 2095 __ str(rscratch2, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2096 2097 rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)); 2098 2099 #ifdef ASSERT 2100 { 2101 Label L; 2102 __ ldr(rscratch2, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2103 __ cbz(rscratch2, L); 2104 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); 2105 __ bind(L); 2106 } 2107 #endif // ASSERT 2108 2109 __ str(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2110 2111 restore_native_result(masm, ret_type, stack_slots); 2112 2113 __ b(unlock_done); 2114 2115 // END Slow path unlock 2116 2117 } // synchronized 2118 2119 // SLOW PATH Reguard the stack if needed 2120 2121 __ bind(reguard); 2122 save_native_result(masm, ret_type, stack_slots); 2123 rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2124 restore_native_result(masm, ret_type, stack_slots); 2125 // and continue 2126 __ b(reguard_done); 2127 2128 2129 2130 __ flush(); 2131 2132 nmethod *nm = nmethod::new_native_nmethod(method, 2133 compile_id, 2134 masm->code(), 2135 vep_offset, 2136 frame_complete, 2137 stack_slots / VMRegImpl::slots_per_word, 2138 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2139 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), 2140 oop_maps); 2141 2142 if (is_critical_native) { 2143 nm->set_lazy_critical_native(true); 2144 } 2145 2146 return nm; 2147 } 2148 2149 // this function returns the adjust size (in number of words) to a c2i adapter 2150 // activation for use during deoptimization 2151 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2152 assert(callee_locals >= callee_parameters, 2153 "test and remove; got more parms than locals"); 2154 if (callee_locals < callee_parameters) 2155 return 0; // No adjustment for negative locals 2156 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2157 // diff is counted in stack words 2158 return round_to(diff, 2); 2159 } 2160 2161 2162 //------------------------------generate_deopt_blob---------------------------- 2163 void SharedRuntime::generate_deopt_blob() { 2164 2165 // Allocate space for the code 2166 ResourceMark rm; 2167 // Setup code generation tools 2168 CodeBuffer buffer("deopt_blob", 2048, 1024); 2169 MacroAssembler* masm = new MacroAssembler(&buffer); 2170 int frame_size_in_words; 2171 OopMap* map = NULL; 2172 OopMapSet *oop_maps = new OopMapSet(); 2173 2174 // ------------- 2175 // This code enters when returning to a de-optimized nmethod. A return 2176 // address has been pushed on the the stack, and return values are in 2177 // registers. 2178 // If we are doing a normal deopt then we were called from the patched 2179 // nmethod from the point we returned to the nmethod. So the return 2180 // address on the stack is wrong by NativeCall::instruction_size 2181 // We will adjust the value so it looks like we have the original return 2182 // address on the stack (like when we eagerly deoptimized). 2183 // In the case of an exception pending when deoptimizing, we enter 2184 // with a return address on the stack that points after the call we patched 2185 // into the exception handler. We have the following register state from, 2186 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp). 2187 // r0: exception oop 2188 // r7: exception handler 2189 // r3: throwing pc 2190 // So in this case we simply jam r3 into the useless return address and 2191 // the stack looks just like we want. 2192 // 2193 // At this point we need to de-opt. We save the argument return 2194 // registers. We call the first C routine, fetch_unroll_info(). This 2195 // routine captures the return values and returns a structure which 2196 // describes the current frame size and the sizes of all replacement frames. 2197 // The current frame is compiled code and may contain many inlined 2198 // functions, each with their own JVM state. We pop the current frame, then 2199 // push all the new frames. Then we call the C routine unpack_frames() to 2200 // populate these frames. Finally unpack_frames() returns us the new target 2201 // address. Notice that callee-save registers are BLOWN here; they have 2202 // already been captured in the vframeArray at the time the return PC was 2203 // patched. 2204 address start = __ pc(); 2205 Label cont; 2206 2207 // Prolog for non exception case! 2208 2209 // Save everything in sight. 2210 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2211 2212 // Normal deoptimization. Save exec mode for unpack_frames. 2213 __ mov(r7, Deoptimization::Unpack_deopt); // callee-saved 2214 __ b(cont); 2215 2216 int reexecute_offset = __ pc() - start; 2217 2218 // Reexecute case 2219 // return address is the pc describes what bci to do re-execute at 2220 2221 // No need to update map as each call to save_live_registers will produce identical oopmap 2222 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2223 2224 __ mov(r7, Deoptimization::Unpack_reexecute); // callee-saved 2225 __ b(cont); 2226 2227 int exception_offset = __ pc() - start; 2228 2229 // Prolog for exception case 2230 2231 // all registers are dead at this entry point, except for r0, and 2232 // r3 which contain the exception oop and exception pc 2233 // respectively. Set them in TLS and fall thru to the 2234 // unpack_with_exception_in_tls entry point. 2235 2236 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 2237 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 2238 2239 int exception_in_tls_offset = __ pc() - start; 2240 2241 // new implementation because exception oop is now passed in JavaThread 2242 2243 // Prolog for exception case 2244 // All registers must be preserved because they might be used by LinearScan 2245 // Exceptiop oop and throwing PC are passed in JavaThread 2246 // tos: stack at point of call to method that threw the exception (i.e. only 2247 // args are on the stack, no return address) 2248 2249 // The return address pushed by save_live_registers will be patched 2250 // later with the throwing pc. The correct value is not available 2251 // now because loading it from memory would destroy registers. 2252 2253 // NB: The SP at this point must be the SP of the method that is 2254 // being deoptimized. Deoptimization assumes that the frame created 2255 // here by save_live_registers is immediately below the method's SP. 2256 // This is a somewhat fragile mechanism. 2257 2258 // Save everything in sight. 2259 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2260 2261 // Now it is safe to overwrite any register 2262 2263 // Deopt during an exception. Save exec mode for unpack_frames. 2264 __ mov(r7, Deoptimization::Unpack_exception); // callee-saved 2265 2266 // load throwing pc from JavaThread and patch it as the return address 2267 // of the current frame. Then clear the field in JavaThread 2268 2269 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); 2270 __ str(r3, Address(rfp)); 2271 __ mov(rscratch1, 0); 2272 __ str(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 2273 2274 #ifdef ASSERT 2275 // verify that there is really an exception oop in JavaThread 2276 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 2277 __ verify_oop(r0); 2278 2279 // verify that there is no pending exception 2280 Label no_pending_exception; 2281 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 2282 __ cbz(rscratch1, no_pending_exception); 2283 __ stop("must not have pending exception here"); 2284 __ bind(no_pending_exception); 2285 #endif 2286 2287 __ bind(cont); 2288 2289 // Call C code. Need thread and this frame, but NOT official VM entry 2290 // crud. We cannot block on this call, no GC can happen. 2291 // 2292 // UnrollBlock* fetch_unroll_info(JavaThread* thread) 2293 2294 // fetch_unroll_info needs to call last_java_frame(). 2295 2296 Label retaddr; 2297 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2298 #ifdef ASSERT0 2299 { Label L; 2300 __ ldr(rscratch1, Address(rthread, 2301 JavaThread::last_Java_fp_offset())); 2302 __ cbz(rscratch1, L); 2303 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); 2304 __ bind(L); 2305 } 2306 #endif // ASSERT 2307 __ mov(c_rarg0, rthread); 2308 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); 2309 __ bl(rscratch1); 2310 __ bind(retaddr); 2311 2312 // Need to have an oopmap that tells fetch_unroll_info where to 2313 // find any register it might need. 2314 oop_maps->add_gc_map(__ pc() - start, map); 2315 2316 __ reset_last_Java_frame(false, true); 2317 2318 // Load UnrollBlock* into rdi 2319 __ mov(r5, r0); 2320 2321 Label noException; 2322 __ cmp(r7, Deoptimization::Unpack_exception); // Was exception pending? 2323 __ b(noException, Assembler::NE); 2324 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 2325 // QQQ this is useless it was NULL above 2326 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); 2327 __ mov(rscratch1, 0); 2328 __ str(rscratch1, Address(rthread, JavaThread::exception_oop_offset())); 2329 __ str(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 2330 2331 __ verify_oop(r0); 2332 2333 // Overwrite the result registers with the exception results. 2334 __ str(r0, Address(sp, RegisterSaver::offset_in_bytes(RegisterSaver::r0_off))); 2335 // I think this is useless 2336 // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); 2337 2338 __ bind(noException); 2339 2340 // Only register save data is on the stack. 2341 // Now restore the result registers. Everything else is either dead 2342 // or captured in the vframeArray. 2343 RegisterSaver::restore_result_registers(masm); 2344 2345 // All of the register save area has been popped of the stack. Only the 2346 // return address remains. 2347 2348 // Pop all the frames we must move/replace. 2349 // 2350 // Frame picture (youngest to oldest) 2351 // 1: self-frame (no frame link) 2352 // 2: deopting frame (no frame link) 2353 // 3: caller of deopting frame (could be compiled/interpreted). 2354 // 2355 // Note: by leaving the return address of self-frame on the stack 2356 // and using the size of frame 2 to adjust the stack 2357 // when we are done the return to frame 3 will still be on the stack. 2358 2359 // Pop deoptimized frame 2360 __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); 2361 __ sub(r2, r2, 2 * wordSize); 2362 __ add(sp, sp, r2); 2363 __ ldrd(rfp, lr, __ post(sp, 2 * wordSize)); 2364 // LR should now be the return address to the caller (3) 2365 2366 #ifdef ASSERT 2367 // Compilers generate code that bang the stack by as much as the 2368 // interpreter would need. So this stack banging should never 2369 // trigger a fault. Verify that it does not on non product builds. 2370 if (UseStackBanging) { 2371 __ ldr(rscratch2, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); 2372 __ bang_stack_size(rscratch2, r2); 2373 } 2374 #endif 2375 // Load address of array of frame pcs into r2 2376 __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 2377 2378 // Trash the old pc 2379 // __ addptr(sp, wordSize); FIXME ???? 2380 2381 // Load address of array of frame sizes into r4 2382 __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); 2383 2384 // Load counter into r3 2385 __ ldr(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); 2386 2387 // Now adjust the caller's stack to make up for the extra locals 2388 // but record the original sp so that we can save it in the skeletal interpreter 2389 // frame and the stack walking of interpreter_sender will get the unextended sp 2390 // value and not the "real" sp value. 2391 2392 const Register sender_sp = r6; 2393 2394 __ mov(sender_sp, sp); 2395 __ ldr(rscratch1, Address(r5, 2396 Deoptimization::UnrollBlock:: 2397 caller_adjustment_offset_in_bytes())); 2398 __ sub(sp, sp, rscratch1); 2399 2400 // Push interpreter frames in a loop 2401 __ mov(rscratch1, (address)0xDEADDEAD); // Make a recognizable pattern 2402 // Initially used to place 0xDEADDEAD in rscratch2 as well - why? 2403 __ mov(rscratch2, 0); 2404 Label loop; 2405 __ bind(loop); 2406 __ ldr(rscratch1, Address(__ post(r4, wordSize))); // Load frame size 2407 __ sub(rscratch1, rscratch1, 2*wordSize); // We'll push pc and fp by hand 2408 __ ldr(lr, Address(__ post(r2, wordSize))); // Load pc 2409 __ enter(); // Save old & set new fp 2410 __ sub(sp, sp, rscratch1); // Prolog 2411 // This value is corrected by layout_activation_impl 2412 __ str(rscratch2, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 2413 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable 2414 __ mov(sender_sp, sp); // Pass sender_sp to next frame 2415 __ sub(r3, r3, 1); // Decrement counter 2416 __ cbnz(r3, loop); 2417 2418 // Re-push self-frame 2419 __ ldr(lr, Address(r2)); 2420 __ enter(); 2421 2422 // Allocate a full sized register save area. We subtract 2 because 2423 // enter() just pushed 2 words 2424 __ sub(sp, sp, (frame_size_in_words - 2) * wordSize); 2425 2426 // Restore frame locals after moving the frame 2427 __ vstr_f64(d0, Address(sp, RegisterSaver::offset_in_bytes(RegisterSaver::fpu_state_off))); 2428 __ strd(r0, Address(sp, RegisterSaver::offset_in_bytes(RegisterSaver::r0_off))); 2429 2430 // Call C code. Need thread but NOT official VM entry 2431 // crud. We cannot block on this call, no GC can happen. Call should 2432 // restore return values to their stack-slots with the new SP. 2433 // 2434 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode) 2435 2436 // Use rfp because the frames look interpreted now 2437 // Don't need the precise return PC here, just precise enough to point into this code blob. 2438 address the_pc = __ pc(); 2439 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 2440 2441 __ mov(c_rarg0, rthread); 2442 __ mov(c_rarg1, r7); // second arg: exec_mode 2443 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 2444 __ bl(rscratch1); 2445 2446 // Set an oopmap for the call site 2447 // Use the same PC we used for the last java frame 2448 oop_maps->add_gc_map(the_pc - start, 2449 new OopMap( frame_size_in_words, 0 )); 2450 2451 // Clear fp AND pc 2452 __ reset_last_Java_frame(true, true); 2453 2454 // Collect return values 2455 __ vldr_f64(d0, Address(sp, RegisterSaver::offset_in_bytes(RegisterSaver::fpu_state_off))); 2456 __ ldrd(r0, Address(sp, RegisterSaver::offset_in_bytes(RegisterSaver::r0_off))); 2457 // I think this is useless (throwing pc?) 2458 // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); 2459 2460 // Pop self-frame. 2461 __ leave(); // Epilog 2462 2463 // Jump to interpreter 2464 __ b(lr); 2465 2466 // Make sure all code is generated 2467 masm->flush(); 2468 2469 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); 2470 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2471 2472 } 2473 2474 uint SharedRuntime::out_preserve_stack_slots() { 2475 return 0; 2476 } 2477 2478 #ifdef COMPILER2 2479 //------------------------------generate_uncommon_trap_blob-------------------- 2480 /*void SharedRuntime::generate_uncommon_trap_blob() { 2481 // Allocate space for the code 2482 ResourceMark rm; 2483 // Setup code generation tools 2484 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 2485 MacroAssembler* masm = new MacroAssembler(&buffer); 2486 2487 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); 2488 2489 address start = __ pc(); 2490 2491 // Push self-frame. We get here with a return address in LR 2492 // and sp should be 16 byte aligned 2493 // push rfp and retaddr by hand 2494 __ strd(rfp, lr, Address(__ pre(sp, -2 * wordSize))); 2495 // we don't expect an arg reg save area 2496 #ifndef PRODUCT 2497 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 2498 #endif 2499 // compiler left unloaded_class_index in j_rarg0 move to where the 2500 // runtime expects it. 2501 if (c_rarg1 != j_rarg0) { 2502 __ mov(c_rarg1, j_rarg0); 2503 } 2504 2505 // we need to set the past SP to the stack pointer of the stub frame 2506 // and the pc to the address where this runtime call will return 2507 // although actually any pc in this code blob will do). 2508 Label retaddr; 2509 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2510 2511 // Call C code. Need thread but NOT official VM entry 2512 // crud. We cannot block on this call, no GC can happen. Call should 2513 // capture callee-saved registers as well as return values. 2514 // Thread is in rdi already. 2515 // 2516 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index); 2517 // 2518 // n.b. 2 gp args, 0 fp args, integral return type 2519 2520 __ mov(c_rarg0, rthread); 2521 __ lea(rscratch1, 2522 RuntimeAddress(CAST_FROM_FN_PTR(address, 2523 Deoptimization::uncommon_trap))); 2524 __ bl(rscratch1); 2525 __ bind(retaddr); 2526 2527 // Set an oopmap for the call site 2528 OopMapSet* oop_maps = new OopMapSet(); 2529 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0); 2530 2531 // location of rfp is known implicitly by the frame sender code 2532 2533 oop_maps->add_gc_map(__ pc() - start, map); 2534 2535 __ reset_last_Java_frame(false, true); 2536 2537 // move UnrollBlock* into r4 2538 __ mov(r4, r0); 2539 2540 // Pop all the frames we must move/replace. 2541 // 2542 // Frame picture (youngest to oldest) 2543 // 1: self-frame (no frame link) 2544 // 2: deopting frame (no frame link) 2545 // 3: caller of deopting frame (could be compiled/interpreted). 2546 2547 // Pop self-frame. We have no frame, and must rely only on r0 and sp. 2548 __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog! 2549 2550 // Pop deoptimized frame (int) 2551 __ ldr(r2, Address(r4, 2552 Deoptimization::UnrollBlock:: 2553 size_of_deoptimized_frame_offset_in_bytes())); 2554 __ sub(r2, r2, 2 * wordSize); 2555 __ add(sp, sp, r2); 2556 __ ldrd(rfp, lr, __ post(sp, 2 * wordSize)); 2557 // LR should now be the return address to the caller (3) frame 2558 2559 #ifdef ASSERT 2560 // Compilers generate code that bang the stack by as much as the 2561 // interpreter would need. So this stack banging should never 2562 // trigger a fault. Verify that it does not on non product builds. 2563 if (UseStackBanging) { 2564 __ ldr(r1, Address(r4, 2565 Deoptimization::UnrollBlock:: 2566 total_frame_sizes_offset_in_bytes())); 2567 __ bang_stack_size(r1, r2); 2568 } 2569 #endif 2570 2571 // Load address of array of frame pcs into r2 (address*) 2572 __ ldr(r2, Address(r4, 2573 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 2574 2575 // Load address of array of frame sizes into r5 (intptr_t*) 2576 __ ldr(r5, Address(r4, 2577 Deoptimization::UnrollBlock:: 2578 frame_sizes_offset_in_bytes())); 2579 2580 // Counter 2581 __ ldr(r3, Address(r4, 2582 Deoptimization::UnrollBlock:: 2583 number_of_frames_offset_in_bytes())); // (int) 2584 2585 // Now adjust the caller's stack to make up for the extra locals but 2586 // record the original sp so that we can save it in the skeletal 2587 // interpreter frame and the stack walking of interpreter_sender 2588 // will get the unextended sp value and not the "real" sp value. 2589 2590 const Register sender_sp = r8; 2591 2592 __ mov(sender_sp, sp); 2593 __ ldr(r1, Address(r4, 2594 Deoptimization::UnrollBlock:: 2595 caller_adjustment_offset_in_bytes())); // (int) 2596 __ sub(sp, sp, r1); 2597 2598 __ mov(rscratch1, 0); 2599 // Push interpreter frames in a loop 2600 Label loop; 2601 __ bind(loop); 2602 __ ldr(r1, Address(r5, 0)); // Load frame size 2603 __ sub(r1, r1, 2 * wordSize); // We'll push pc and rfp by hand 2604 __ ldr(lr, Address(r2, 0)); // Save return address 2605 __ enter(); // and old rfp & set new rfp 2606 __ sub(sp, sp, r1); // Prolog 2607 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable 2608 // This value is corrected by layout_activation_impl 2609 __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); //zero it 2610 __ mov(sender_sp, sp); // Pass sender_sp to next frame 2611 __ add(r5, r5, wordSize); // Bump array pointer (sizes) 2612 __ add(r2, r2, wordSize); // Bump array pointer (pcs) 2613 __ subs(r3, r3, 1); // Decrement counter 2614 __ b(loop, Assembler::GT); 2615 __ ldr(lr, Address(r2, 0)); // save final return address 2616 // Re-push self-frame 2617 __ enter(); // & old rfp & set new rfp 2618 2619 // Use rfp because the frames look interpreted now 2620 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP. 2621 // Don't need the precise return PC here, just precise enough to point into this code blob. 2622 address the_pc = __ pc(); 2623 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 2624 2625 // Call C code. Need thread but NOT official VM entry 2626 // crud. We cannot block on this call, no GC can happen. Call should 2627 // restore return values to their stack-slots with the new SP. 2628 // Thread is in rdi already. 2629 // 2630 // BasicType unpack_frames(JavaThread* thread, int exec_mode); 2631 // 2632 // n.b. 2 gp args, 0 fp args, integral return type 2633 2634 // sp should already be aligned 2635 __ mov(c_rarg0, rthread); 2636 __ mov(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap); 2637 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 2638 __ bl(rscratch1); 2639 2640 // Set an oopmap for the call site 2641 // Use the same PC we used for the last java frame 2642 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 2643 2644 // Clear fp AND pc 2645 __ reset_last_Java_frame(true, true); 2646 2647 // Pop self-frame. 2648 __ leave(); // Epilog 2649 2650 // Jump to interpreter 2651 __ b(lr); 2652 2653 // Make sure all code is generated 2654 masm->flush(); 2655 2656 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, 2657 SimpleRuntimeFrame::framesize >> 1); 2658 2659 } */ 2660 #endif // COMPILER2 2661 2662 2663 //------------------------------generate_handler_blob------ 2664 // 2665 // Generate a special Compile2Runtime blob that saves all registers, 2666 // and setup oopmap. 2667 // 2668 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 2669 ResourceMark rm; 2670 OopMapSet *oop_maps = new OopMapSet(); 2671 OopMap* map; 2672 2673 // Allocate space for the code. Setup code generation tools. 2674 CodeBuffer buffer("handler_blob", 2048, 1024); 2675 MacroAssembler* masm = new MacroAssembler(&buffer); 2676 2677 address start = __ pc(); 2678 address call_pc = NULL; 2679 int frame_size_in_words; 2680 bool cause_return = (poll_type == POLL_AT_RETURN); 2681 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); 2682 2683 // Save registers, fpu state, and flags 2684 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2685 2686 // The following is basically a call_VM. However, we need the precise 2687 // address of the call in order to generate an oopmap. Hence, we do all the 2688 // work outselves. 2689 2690 Label retaddr; 2691 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2692 2693 // The return address must always be correct so that frame constructor never 2694 // sees an invalid pc. 2695 2696 if (!cause_return) { 2697 // overwrite the return address pushed by save_live_registers 2698 __ ldr(lr, Address(rthread, JavaThread::saved_exception_pc_offset())); 2699 __ str(lr, Address(rfp)); 2700 } 2701 2702 // Do the call 2703 __ mov(c_rarg0, rthread); 2704 __ lea(rscratch1, RuntimeAddress(call_ptr)); 2705 __ bl(rscratch1); 2706 __ bind(retaddr); 2707 2708 // Set an oopmap for the call site. This oopmap will map all 2709 // oop-registers and debug-info registers as callee-saved. This 2710 // will allow deoptimization at this safepoint to find all possible 2711 // debug-info recordings, as well as let GC find all oops. 2712 2713 oop_maps->add_gc_map( __ pc() - start, map); 2714 2715 Label noException; 2716 2717 __ reset_last_Java_frame(false, true); 2718 2719 __ maybe_isb(); 2720 __ membar(Assembler::LoadLoad | Assembler::LoadStore); 2721 2722 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 2723 __ cbz(rscratch1, noException); 2724 2725 // Exception pending 2726 2727 RegisterSaver::restore_live_registers(masm); 2728 2729 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2730 2731 // No exception case 2732 __ bind(noException); 2733 2734 // Normal exit, restore registers and exit. 2735 RegisterSaver::restore_live_registers(masm); 2736 2737 __ b(lr); 2738 2739 // Make sure all code is generated 2740 masm->flush(); 2741 2742 // Fill-out other meta info 2743 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); 2744 } 2745 2746 // 2747 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 2748 // 2749 // Generate a stub that calls into vm to find out the proper destination 2750 // of a java call. All the argument registers are live at this point 2751 // but since this is generic code we don't know what they are and the caller 2752 // must do any gc of the args. 2753 // 2754 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 2755 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 2756 2757 // allocate space for the code 2758 ResourceMark rm; 2759 2760 //CodeBuffer buffer(name, 1000, 512); 2761 CodeBuffer buffer(name, 2048, 512 ); // changed as error later 2762 MacroAssembler* masm = new MacroAssembler(&buffer); 2763 2764 int frame_size_in_words; 2765 2766 OopMapSet *oop_maps = new OopMapSet(); 2767 OopMap* map = NULL; 2768 2769 int start = __ offset(); 2770 2771 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2772 2773 int frame_complete = __ offset(); 2774 2775 { 2776 Label retaddr; 2777 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2778 2779 __ mov(c_rarg0, rthread); 2780 __ lea(rscratch1, RuntimeAddress(destination)); 2781 2782 __ bl(rscratch1); 2783 __ bind(retaddr); 2784 } 2785 2786 // Set an oopmap for the call site. 2787 // We need this not only for callee-saved registers, but also for volatile 2788 // registers that the compiler might be keeping live across a safepoint. 2789 2790 oop_maps->add_gc_map( __ offset() - start, map); 2791 2792 __ maybe_isb(); 2793 2794 // r0 contains the address we are going to jump to assuming no exception got installed 2795 2796 // clear last_Java_sp 2797 // TODO x86 have different action: reset_last_Java_frame(thread, true(fp), false(pc)); 2798 // TODO below is false(fp), true(pc) 2799 __ reset_last_Java_frame(false, true); 2800 // check for pending exceptions 2801 Label pending; 2802 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 2803 __ cbnz(rscratch1, pending); 2804 2805 // get the returned Method* 2806 __ get_vm_result_2(rmethod, rthread); 2807 __ str(rmethod, Address(sp, RegisterSaver::offset_in_bytes(RegisterSaver::rmethod_off))); 2808 2809 // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch 2810 __ str(r0, Address(sp, RegisterSaver::offset_in_bytes(RegisterSaver::rscratch1_off))); 2811 RegisterSaver::restore_live_registers(masm); 2812 2813 // We are back the the original state on entry and ready to go. 2814 2815 __ b(rscratch1); 2816 2817 // Pending exception after the safepoint 2818 2819 __ bind(pending); 2820 2821 RegisterSaver::restore_live_registers(masm); 2822 2823 // exception pending => remove activation and forward to exception handler 2824 __ mov(rscratch1, 0); 2825 __ str(rscratch1, Address(rthread, JavaThread::vm_result_offset())); 2826 2827 __ ldr(r0, Address(rthread, Thread::pending_exception_offset())); 2828 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2829 2830 // ------------- 2831 // make sure all code is generated 2832 masm->flush(); 2833 2834 // return the blob 2835 // frame_size_words or bytes?? 2836 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); 2837 } 2838 2839 2840 #ifdef COMPILER2 2841 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame 2842 // 2843 //------------------------------generate_exception_blob--------------------------- 2844 // creates exception blob at the end 2845 // Using exception blob, this code is jumped from a compiled method. 2846 // (see emit_exception_handler in x86_64.ad file) 2847 // 2848 // Given an exception pc at a call we call into the runtime for the 2849 // handler in this method. This handler might merely restore state 2850 // (i.e. callee save registers) unwind the frame and jump to the 2851 // exception handler for the nmethod if there is no Java level handler 2852 // for the nmethod. 2853 // 2854 // This code is entered with a jmp. 2855 // 2856 // Arguments: 2857 // r0: exception oop 2858 // r3: exception pc 2859 // 2860 // Results: 2861 // r0: exception oop 2862 // r3: exception pc in caller or ??? 2863 // destination: exception handler of caller 2864 // 2865 // Note: the exception pc MUST be at a call (precise debug information) 2866 // Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved. 2867 // 2868 2869 void OptoRuntime::generate_exception_blob() { 2870 assert(!OptoRuntime::is_callee_saved_register(R3_num), ""); 2871 assert(!OptoRuntime::is_callee_saved_register(R0_num), ""); 2872 assert(!OptoRuntime::is_callee_saved_register(R2_num), ""); 2873 2874 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); 2875 2876 // Allocate space for the code 2877 ResourceMark rm; 2878 // Setup code generation tools 2879 CodeBuffer buffer("exception_blob", 2048, 1024); 2880 MacroAssembler* masm = new MacroAssembler(&buffer); 2881 2882 __ stop("FIXME generate_exception_blob"); 2883 // TODO check various assumptions made here 2884 // 2885 // make sure we do so before running this 2886 2887 address start = __ pc(); 2888 2889 // push rfp and retaddr by hand 2890 // Exception pc is 'return address' for stack walker 2891 __ strd(rfp, lr, Address(__ pre(sp, -2 * wordSize))); 2892 // there are no callee save registers and we don't expect an 2893 // arg reg save area 2894 #ifndef PRODUCT 2895 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 2896 #endif 2897 // Store exception in Thread object. We cannot pass any arguments to the 2898 // handle_exception call, since we do not want to make any assumption 2899 // about the size of the frame where the exception happened in. 2900 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 2901 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 2902 2903 // This call does all the hard work. It checks if an exception handler 2904 // exists in the method. 2905 // If so, it returns the handler address. 2906 // If not, it prepares for stack-unwinding, restoring the callee-save 2907 // registers of the frame being removed. 2908 // 2909 // address OptoRuntime::handle_exception_C(JavaThread* thread) 2910 // 2911 // n.b. 1 gp arg, 0 fp args, integral return type 2912 2913 // the stack should always be aligned 2914 address the_pc = __ pc(); 2915 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1); 2916 __ mov(c_rarg0, rthread); 2917 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); 2918 __ bl(rscratch1); 2919 __ maybe_isb(); 2920 2921 // Set an oopmap for the call site. This oopmap will only be used if we 2922 // are unwinding the stack. Hence, all locations will be dead. 2923 // Callee-saved registers will be the same as the frame above (i.e., 2924 // handle_exception_stub), since they were restored when we got the 2925 // exception. 2926 2927 OopMapSet* oop_maps = new OopMapSet(); 2928 2929 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 2930 2931 __ reset_last_Java_frame(false, true); 2932 2933 // Restore callee-saved registers 2934 2935 // rfp is an implicitly saved callee saved register (i.e. the calling 2936 // convention will save restore it in prolog/epilog) Other than that 2937 // there are no callee save registers now that adapter frames are gone. 2938 // and we dont' expect an arg reg save area 2939 __ ldrd(rfp, r3, Address(__ post(sp, 2 * wordSize))); 2940 2941 // r0: exception handler 2942 2943 // We have a handler in r0 (could be deopt blob). 2944 __ mov(r8, r0); 2945 2946 // Get the exception oop 2947 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 2948 // Get the exception pc in case we are deoptimized 2949 __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset())); 2950 __ mov(rscratch1, 0); 2951 #ifdef ASSERT 2952 __ str(rscratch1, Address(rthread, JavaThread::exception_handler_pc_offset())); 2953 __ str(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 2954 #endif 2955 // Clear the exception oop so GC no longer processes it as a root. 2956 __ str(rscratch1, Address(rthread, JavaThread::exception_oop_offset())); 2957 2958 // r0: exception oop 2959 // r8: exception handler 2960 // r4: exception pc 2961 // Jump to handler 2962 2963 __ b(r8); 2964 2965 // Make sure all code is generated 2966 masm->flush(); 2967 2968 // Set exception blob 2969 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); 2970 } 2971 #endif // COMPILER2