1 /* 2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "code/debugInfoRec.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/vtableStubs.hpp" 30 #include "gc/shared/gcLocker.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "logging/log.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/compiledICHolder.hpp" 35 #include "runtime/safepointMechanism.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/vframeArray.hpp" 38 #include "utilities/align.hpp" 39 #include "vmreg_sparc.inline.hpp" 40 #ifdef COMPILER1 41 #include "c1/c1_Runtime1.hpp" 42 #endif 43 #ifdef COMPILER2 44 #include "opto/runtime.hpp" 45 #endif 46 #if INCLUDE_JVMCI 47 #include "jvmci/jvmciJavaClasses.hpp" 48 #endif 49 50 #define __ masm-> 51 52 53 class RegisterSaver { 54 55 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O. 56 // The Oregs are problematic. In the 32bit build the compiler can 57 // have O registers live with 64 bit quantities. A window save will 58 // cut the heads off of the registers. We have to do a very extensive 59 // stack dance to save and restore these properly. 60 61 // Note that the Oregs problem only exists if we block at either a polling 62 // page exception a compiled code safepoint that was not originally a call 63 // or deoptimize following one of these kinds of safepoints. 64 65 // Lots of registers to save. For all builds, a window save will preserve 66 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit 67 // builds a window-save will preserve the %o registers. In the LION build 68 // we need to save the 64-bit %o registers which requires we save them 69 // before the window-save (as then they become %i registers and get their 70 // heads chopped off on interrupt). We have to save some %g registers here 71 // as well. 72 enum { 73 // This frame's save area. Includes extra space for the native call: 74 // vararg's layout space and the like. Briefly holds the caller's 75 // register save area. 76 call_args_area = frame::register_save_words_sp_offset + 77 frame::memory_parameter_word_sp_offset*wordSize, 78 // Make sure save locations are always 8 byte aligned. 79 // can't use align_up because it doesn't produce compile time constant 80 start_of_extra_save_area = ((call_args_area + 7) & ~7), 81 g1_offset = start_of_extra_save_area, // g-regs needing saving 82 g3_offset = g1_offset+8, 83 g4_offset = g3_offset+8, 84 g5_offset = g4_offset+8, 85 o0_offset = g5_offset+8, 86 o1_offset = o0_offset+8, 87 o2_offset = o1_offset+8, 88 o3_offset = o2_offset+8, 89 o4_offset = o3_offset+8, 90 o5_offset = o4_offset+8, 91 start_of_flags_save_area = o5_offset+8, 92 ccr_offset = start_of_flags_save_area, 93 fsr_offset = ccr_offset + 8, 94 d00_offset = fsr_offset+8, // Start of float save area 95 register_save_size = d00_offset+8*32 96 }; 97 98 99 public: 100 101 static int Oexception_offset() { return o0_offset; }; 102 static int G3_offset() { return g3_offset; }; 103 static int G5_offset() { return g5_offset; }; 104 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 105 static void restore_live_registers(MacroAssembler* masm); 106 107 // During deoptimization only the result register need to be restored 108 // all the other values have already been extracted. 109 110 static void restore_result_registers(MacroAssembler* masm); 111 }; 112 113 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 114 // Record volatile registers as callee-save values in an OopMap so their save locations will be 115 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 116 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 117 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 118 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 119 int i; 120 // Always make the frame size 16 byte aligned. 121 int frame_size = align_up(additional_frame_words + register_save_size, 16); 122 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words 123 int frame_size_in_slots = frame_size / sizeof(jint); 124 // CodeBlob frame size is in words. 125 *total_frame_words = frame_size / wordSize; 126 // OopMap* map = new OopMap(*total_frame_words, 0); 127 OopMap* map = new OopMap(frame_size_in_slots, 0); 128 129 __ save(SP, -frame_size, SP); 130 131 132 int debug_offset = 0; 133 // Save the G's 134 __ stx(G1, SP, g1_offset+STACK_BIAS); 135 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); 136 137 __ stx(G3, SP, g3_offset+STACK_BIAS); 138 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg()); 139 140 __ stx(G4, SP, g4_offset+STACK_BIAS); 141 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg()); 142 143 __ stx(G5, SP, g5_offset+STACK_BIAS); 144 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg()); 145 146 // This is really a waste but we'll keep things as they were for now 147 if (true) { 148 } 149 150 151 // Save the flags 152 __ rdccr( G5 ); 153 __ stx(G5, SP, ccr_offset+STACK_BIAS); 154 __ stxfsr(SP, fsr_offset+STACK_BIAS); 155 156 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles) 157 int offset = d00_offset; 158 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 159 FloatRegister f = as_FloatRegister(i); 160 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS); 161 // Record as callee saved both halves of double registers (2 float registers). 162 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg()); 163 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next()); 164 offset += sizeof(double); 165 } 166 167 // And we're done. 168 169 return map; 170 } 171 172 173 // Pop the current frame and restore all the registers that we 174 // saved. 175 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 176 177 // Restore all the FP registers 178 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 179 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i)); 180 } 181 182 __ ldx(SP, ccr_offset+STACK_BIAS, G1); 183 __ wrccr (G1) ; 184 185 // Restore the G's 186 // Note that G2 (AKA GThread) must be saved and restored separately. 187 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr. 188 189 __ ldx(SP, g1_offset+STACK_BIAS, G1); 190 __ ldx(SP, g3_offset+STACK_BIAS, G3); 191 __ ldx(SP, g4_offset+STACK_BIAS, G4); 192 __ ldx(SP, g5_offset+STACK_BIAS, G5); 193 194 // Restore flags 195 196 __ ldxfsr(SP, fsr_offset+STACK_BIAS); 197 198 __ restore(); 199 200 } 201 202 // Pop the current frame and restore the registers that might be holding 203 // a result. 204 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 205 206 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); 207 208 __ restore(); 209 210 } 211 212 // Is vector's size (in bytes) bigger than a size saved by default? 213 // 8 bytes FP registers are saved by default on SPARC. 214 bool SharedRuntime::is_wide_vector(int size) { 215 // Note, MaxVectorSize == 8 on SPARC. 216 assert(size <= 8, "%d bytes vectors are not supported", size); 217 return size > 8; 218 } 219 220 size_t SharedRuntime::trampoline_size() { 221 return 40; 222 } 223 224 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { 225 __ set((intptr_t)destination, G3_scratch); 226 __ JMP(G3_scratch, 0); 227 __ delayed()->nop(); 228 } 229 230 // The java_calling_convention describes stack locations as ideal slots on 231 // a frame with no abi restrictions. Since we must observe abi restrictions 232 // (like the placement of the register window) the slots must be biased by 233 // the following value. 234 static int reg2offset(VMReg r) { 235 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 236 } 237 238 static VMRegPair reg64_to_VMRegPair(Register r) { 239 VMRegPair ret; 240 if (wordSize == 8) { 241 ret.set2(r->as_VMReg()); 242 } else { 243 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg()); 244 } 245 return ret; 246 } 247 248 // --------------------------------------------------------------------------- 249 // Read the array of BasicTypes from a signature, and compute where the 250 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 251 // quantities. Values less than VMRegImpl::stack0 are registers, those above 252 // refer to 4-byte stack slots. All stack slots are based off of the window 253 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window, 254 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 255 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit 256 // integer registers. Values 64-95 are the (32-bit only) float registers. 257 // Each 32-bit quantity is given its own number, so the integer registers 258 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is 259 // an O0-low and an O0-high. Essentially, all int register numbers are doubled. 260 261 // Register results are passed in O0-O5, for outgoing call arguments. To 262 // convert to incoming arguments, convert all O's to I's. The regs array 263 // refer to the low and hi 32-bit words of 64-bit registers or stack slots. 264 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a 265 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was 266 // passed (used as a placeholder for the other half of longs and doubles in 267 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is 268 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention). 269 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first() 270 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the 271 // same VMRegPair. 272 273 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 274 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 275 // units regardless of build. 276 277 278 // --------------------------------------------------------------------------- 279 // The compiled Java calling convention. The Java convention always passes 280 // 64-bit values in adjacent aligned locations (either registers or stack), 281 // floats in float registers and doubles in aligned float pairs. There is 282 // no backing varargs store for values in registers. 283 // In the 32-bit build, longs are passed on the stack (cannot be 284 // passed in I's, because longs in I's get their heads chopped off at 285 // interrupt). 286 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 287 VMRegPair *regs, 288 int total_args_passed, 289 int is_outgoing) { 290 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers"); 291 292 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM; 293 const int flt_reg_max = 8; 294 295 int int_reg = 0; 296 int flt_reg = 0; 297 int slot = 0; 298 299 for (int i = 0; i < total_args_passed; i++) { 300 switch (sig_bt[i]) { 301 case T_INT: 302 case T_SHORT: 303 case T_CHAR: 304 case T_BYTE: 305 case T_BOOLEAN: 306 if (int_reg < int_reg_max) { 307 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 308 regs[i].set1(r->as_VMReg()); 309 } else { 310 regs[i].set1(VMRegImpl::stack2reg(slot++)); 311 } 312 break; 313 314 case T_LONG: 315 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 316 // fall-through 317 case T_OBJECT: 318 case T_ARRAY: 319 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 320 if (int_reg < int_reg_max) { 321 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 322 regs[i].set2(r->as_VMReg()); 323 } else { 324 slot = align_up(slot, 2); // align 325 regs[i].set2(VMRegImpl::stack2reg(slot)); 326 slot += 2; 327 } 328 break; 329 330 case T_FLOAT: 331 if (flt_reg < flt_reg_max) { 332 FloatRegister r = as_FloatRegister(flt_reg++); 333 regs[i].set1(r->as_VMReg()); 334 } else { 335 regs[i].set1(VMRegImpl::stack2reg(slot++)); 336 } 337 break; 338 339 case T_DOUBLE: 340 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 341 if (align_up(flt_reg, 2) + 1 < flt_reg_max) { 342 flt_reg = align_up(flt_reg, 2); // align 343 FloatRegister r = as_FloatRegister(flt_reg); 344 regs[i].set2(r->as_VMReg()); 345 flt_reg += 2; 346 } else { 347 slot = align_up(slot, 2); // align 348 regs[i].set2(VMRegImpl::stack2reg(slot)); 349 slot += 2; 350 } 351 break; 352 353 case T_VOID: 354 regs[i].set_bad(); // Halves of longs & doubles 355 break; 356 357 default: 358 fatal("unknown basic type %d", sig_bt[i]); 359 break; 360 } 361 } 362 363 // retun the amount of stack space these arguments will need. 364 return slot; 365 } 366 367 // Helper class mostly to avoid passing masm everywhere, and handle 368 // store displacement overflow logic. 369 class AdapterGenerator { 370 MacroAssembler *masm; 371 Register Rdisp; 372 void set_Rdisp(Register r) { Rdisp = r; } 373 374 void patch_callers_callsite(); 375 376 // base+st_off points to top of argument 377 int arg_offset(const int st_off) { return st_off; } 378 int next_arg_offset(const int st_off) { 379 return st_off - Interpreter::stackElementSize; 380 } 381 382 // Argument slot values may be loaded first into a register because 383 // they might not fit into displacement. 384 RegisterOrConstant arg_slot(const int st_off); 385 RegisterOrConstant next_arg_slot(const int st_off); 386 387 // Stores long into offset pointed to by base 388 void store_c2i_long(Register r, Register base, 389 const int st_off, bool is_stack); 390 void store_c2i_object(Register r, Register base, 391 const int st_off); 392 void store_c2i_int(Register r, Register base, 393 const int st_off); 394 void store_c2i_double(VMReg r_2, 395 VMReg r_1, Register base, const int st_off); 396 void store_c2i_float(FloatRegister f, Register base, 397 const int st_off); 398 399 public: 400 void gen_c2i_adapter(int total_args_passed, 401 // VMReg max_arg, 402 int comp_args_on_stack, // VMRegStackSlots 403 const BasicType *sig_bt, 404 const VMRegPair *regs, 405 Label& skip_fixup); 406 void gen_i2c_adapter(int total_args_passed, 407 // VMReg max_arg, 408 int comp_args_on_stack, // VMRegStackSlots 409 const BasicType *sig_bt, 410 const VMRegPair *regs); 411 412 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} 413 }; 414 415 416 // Patch the callers callsite with entry to compiled code if it exists. 417 void AdapterGenerator::patch_callers_callsite() { 418 Label L; 419 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 420 __ br_null(G3_scratch, false, Assembler::pt, L); 421 __ delayed()->nop(); 422 // Call into the VM to patch the caller, then jump to compiled callee 423 __ save_frame(4); // Args in compiled layout; do not blow them 424 425 // Must save all the live Gregs the list is: 426 // G1: 1st Long arg (32bit build) 427 // G2: global allocated to TLS 428 // G3: used in inline cache check (scratch) 429 // G4: 2nd Long arg (32bit build); 430 // G5: used in inline cache check (Method*) 431 432 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 433 434 // mov(s,d) 435 __ mov(G1, L1); 436 __ mov(G4, L4); 437 __ mov(G5_method, L5); 438 __ mov(G5_method, O0); // VM needs target method 439 __ mov(I7, O1); // VM needs caller's callsite 440 // Must be a leaf call... 441 // can be very far once the blob has been relocated 442 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 443 __ relocate(relocInfo::runtime_call_type); 444 __ jumpl_to(dest, O7, O7); 445 __ delayed()->mov(G2_thread, L7_thread_cache); 446 __ mov(L7_thread_cache, G2_thread); 447 __ mov(L1, G1); 448 __ mov(L4, G4); 449 __ mov(L5, G5_method); 450 451 __ restore(); // Restore args 452 __ bind(L); 453 } 454 455 456 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) { 457 RegisterOrConstant roc(arg_offset(st_off)); 458 return __ ensure_simm13_or_reg(roc, Rdisp); 459 } 460 461 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) { 462 RegisterOrConstant roc(next_arg_offset(st_off)); 463 return __ ensure_simm13_or_reg(roc, Rdisp); 464 } 465 466 467 // Stores long into offset pointed to by base 468 void AdapterGenerator::store_c2i_long(Register r, Register base, 469 const int st_off, bool is_stack) { 470 // In V9, longs are given 2 64-bit slots in the interpreter, but the 471 // data is passed in only 1 slot. 472 __ stx(r, base, next_arg_slot(st_off)); 473 } 474 475 void AdapterGenerator::store_c2i_object(Register r, Register base, 476 const int st_off) { 477 __ st_ptr (r, base, arg_slot(st_off)); 478 } 479 480 void AdapterGenerator::store_c2i_int(Register r, Register base, 481 const int st_off) { 482 __ st (r, base, arg_slot(st_off)); 483 } 484 485 // Stores into offset pointed to by base 486 void AdapterGenerator::store_c2i_double(VMReg r_2, 487 VMReg r_1, Register base, const int st_off) { 488 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 489 // data is passed in only 1 slot. 490 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 491 } 492 493 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 494 const int st_off) { 495 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off)); 496 } 497 498 void AdapterGenerator::gen_c2i_adapter( 499 int total_args_passed, 500 // VMReg max_arg, 501 int comp_args_on_stack, // VMRegStackSlots 502 const BasicType *sig_bt, 503 const VMRegPair *regs, 504 Label& L_skip_fixup) { 505 506 // Before we get into the guts of the C2I adapter, see if we should be here 507 // at all. We've come from compiled code and are attempting to jump to the 508 // interpreter, which means the caller made a static call to get here 509 // (vcalls always get a compiled target if there is one). Check for a 510 // compiled target. If there is one, we need to patch the caller's call. 511 // However we will run interpreted if we come thru here. The next pass 512 // thru the call site will run compiled. If we ran compiled here then 513 // we can (theorectically) do endless i2c->c2i->i2c transitions during 514 // deopt/uncommon trap cycles. If we always go interpreted here then 515 // we can have at most one and don't need to play any tricks to keep 516 // from endlessly growing the stack. 517 // 518 // Actually if we detected that we had an i2c->c2i transition here we 519 // ought to be able to reset the world back to the state of the interpreted 520 // call and not bother building another interpreter arg area. We don't 521 // do that at this point. 522 523 patch_callers_callsite(); 524 525 __ bind(L_skip_fixup); 526 527 // Since all args are passed on the stack, total_args_passed*wordSize is the 528 // space we need. Add in varargs area needed by the interpreter. Round up 529 // to stack alignment. 530 const int arg_size = total_args_passed * Interpreter::stackElementSize; 531 const int varargs_area = 532 (frame::varargs_offset - frame::register_save_words)*wordSize; 533 const int extraspace = align_up(arg_size + varargs_area, 2*wordSize); 534 535 const int bias = STACK_BIAS; 536 const int interp_arg_offset = frame::varargs_offset*wordSize + 537 (total_args_passed-1)*Interpreter::stackElementSize; 538 539 const Register base = SP; 540 541 // Make some extra space on the stack. 542 __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP); 543 set_Rdisp(G3_scratch); 544 545 // Write the args into the outgoing interpreter space. 546 for (int i = 0; i < total_args_passed; i++) { 547 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias; 548 VMReg r_1 = regs[i].first(); 549 VMReg r_2 = regs[i].second(); 550 if (!r_1->is_valid()) { 551 assert(!r_2->is_valid(), ""); 552 continue; 553 } 554 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1 555 RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias; 556 ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp); 557 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle 558 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); 559 else __ ldx(base, ld_off, G1_scratch); 560 } 561 562 if (r_1->is_Register()) { 563 Register r = r_1->as_Register()->after_restore(); 564 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 565 store_c2i_object(r, base, st_off); 566 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 567 store_c2i_long(r, base, st_off, r_2->is_stack()); 568 } else { 569 store_c2i_int(r, base, st_off); 570 } 571 } else { 572 assert(r_1->is_FloatRegister(), ""); 573 if (sig_bt[i] == T_FLOAT) { 574 store_c2i_float(r_1->as_FloatRegister(), base, st_off); 575 } else { 576 assert(sig_bt[i] == T_DOUBLE, "wrong type"); 577 store_c2i_double(r_2, r_1, base, st_off); 578 } 579 } 580 } 581 582 // Load the interpreter entry point. 583 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 584 585 // Pass O5_savedSP as an argument to the interpreter. 586 // The interpreter will restore SP to this value before returning. 587 __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP); 588 589 __ mov((frame::varargs_offset)*wordSize - 590 1*Interpreter::stackElementSize+bias+BytesPerWord, G1); 591 // Jump to the interpreter just as if interpreter was doing it. 592 __ jmpl(G3_scratch, 0, G0); 593 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp 594 // (really L0) is in use by the compiled frame as a generic temp. However, 595 // the interpreter does not know where its args are without some kind of 596 // arg pointer being passed in. Pass it in Gargs. 597 __ delayed()->add(SP, G1, Gargs); 598 } 599 600 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg, 601 address code_start, address code_end, 602 Label& L_ok) { 603 Label L_fail; 604 __ set(ExternalAddress(code_start), temp_reg); 605 __ set(pointer_delta(code_end, code_start, 1), temp2_reg); 606 __ cmp(pc_reg, temp_reg); 607 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail); 608 __ delayed()->add(temp_reg, temp2_reg, temp_reg); 609 __ cmp(pc_reg, temp_reg); 610 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); 611 __ bind(L_fail); 612 } 613 614 void AdapterGenerator::gen_i2c_adapter(int total_args_passed, 615 // VMReg max_arg, 616 int comp_args_on_stack, // VMRegStackSlots 617 const BasicType *sig_bt, 618 const VMRegPair *regs) { 619 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 620 // layout. Lesp was saved by the calling I-frame and will be restored on 621 // return. Meanwhile, outgoing arg space is all owned by the callee 622 // C-frame, so we can mangle it at will. After adjusting the frame size, 623 // hoist register arguments and repack other args according to the compiled 624 // code convention. Finally, end in a jump to the compiled code. The entry 625 // point address is the start of the buffer. 626 627 // We will only enter here from an interpreted frame and never from after 628 // passing thru a c2i. Azul allowed this but we do not. If we lose the 629 // race and use a c2i we will remain interpreted for the race loser(s). 630 // This removes all sorts of headaches on the x86 side and also eliminates 631 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 632 633 // More detail: 634 // Adapters can be frameless because they do not require the caller 635 // to perform additional cleanup work, such as correcting the stack pointer. 636 // An i2c adapter is frameless because the *caller* frame, which is interpreted, 637 // routinely repairs its own stack pointer (from interpreter_frame_last_sp), 638 // even if a callee has modified the stack pointer. 639 // A c2i adapter is frameless because the *callee* frame, which is interpreted, 640 // routinely repairs its caller's stack pointer (from sender_sp, which is set 641 // up via the senderSP register). 642 // In other words, if *either* the caller or callee is interpreted, we can 643 // get the stack pointer repaired after a call. 644 // This is why c2i and i2c adapters cannot be indefinitely composed. 645 // In particular, if a c2i adapter were to somehow call an i2c adapter, 646 // both caller and callee would be compiled methods, and neither would 647 // clean up the stack pointer changes performed by the two adapters. 648 // If this happens, control eventually transfers back to the compiled 649 // caller, but with an uncorrected stack, causing delayed havoc. 650 651 if (VerifyAdapterCalls && 652 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { 653 // So, let's test for cascading c2i/i2c adapters right now. 654 // assert(Interpreter::contains($return_addr) || 655 // StubRoutines::contains($return_addr), 656 // "i2c adapter must return to an interpreter frame"); 657 __ block_comment("verify_i2c { "); 658 Label L_ok; 659 if (Interpreter::code() != NULL) 660 range_check(masm, O7, O0, O1, 661 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 662 L_ok); 663 if (StubRoutines::code1() != NULL) 664 range_check(masm, O7, O0, O1, 665 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), 666 L_ok); 667 if (StubRoutines::code2() != NULL) 668 range_check(masm, O7, O0, O1, 669 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), 670 L_ok); 671 const char* msg = "i2c adapter must return to an interpreter frame"; 672 __ block_comment(msg); 673 __ stop(msg); 674 __ bind(L_ok); 675 __ block_comment("} verify_i2ce "); 676 } 677 678 // As you can see from the list of inputs & outputs there are not a lot 679 // of temp registers to work with: mostly G1, G3 & G4. 680 681 // Inputs: 682 // G2_thread - TLS 683 // G5_method - Method oop 684 // G4 (Gargs) - Pointer to interpreter's args 685 // O0..O4 - free for scratch 686 // O5_savedSP - Caller's saved SP, to be restored if needed 687 // O6 - Current SP! 688 // O7 - Valid return address 689 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 690 691 // Outputs: 692 // G2_thread - TLS 693 // O0-O5 - Outgoing args in compiled layout 694 // O6 - Adjusted or restored SP 695 // O7 - Valid return address 696 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 697 // F0-F7 - more outgoing args 698 699 700 // Gargs is the incoming argument base, and also an outgoing argument. 701 __ sub(Gargs, BytesPerWord, Gargs); 702 703 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME 704 // WITH O7 HOLDING A VALID RETURN PC 705 // 706 // | | 707 // : java stack : 708 // | | 709 // +--------------+ <--- start of outgoing args 710 // | receiver | | 711 // : rest of args : |---size is java-arg-words 712 // | | | 713 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I 714 // | | | 715 // : unused : |---Space for max Java stack, plus stack alignment 716 // | | | 717 // +--------------+ <--- SP + 16*wordsize 718 // | | 719 // : window : 720 // | | 721 // +--------------+ <--- SP 722 723 // WE REPACK THE STACK. We use the common calling convention layout as 724 // discovered by calling SharedRuntime::calling_convention. We assume it 725 // causes an arbitrary shuffle of memory, which may require some register 726 // temps to do the shuffle. We hope for (and optimize for) the case where 727 // temps are not needed. We may have to resize the stack slightly, in case 728 // we need alignment padding (32-bit interpreter can pass longs & doubles 729 // misaligned, but the compilers expect them aligned). 730 // 731 // | | 732 // : java stack : 733 // | | 734 // +--------------+ <--- start of outgoing args 735 // | pad, align | | 736 // +--------------+ | 737 // | ints, longs, | | 738 // | floats, | |---Outgoing stack args. 739 // : doubles : | First few args in registers. 740 // | | | 741 // +--------------+ <--- SP' + 16*wordsize 742 // | | 743 // : window : 744 // | | 745 // +--------------+ <--- SP' 746 747 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME 748 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP 749 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN. 750 751 // Cut-out for having no stack args. Since up to 6 args are passed 752 // in registers, we will commonly have no stack args. 753 if (comp_args_on_stack > 0) { 754 // Convert VMReg stack slots to words. 755 int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 756 // Round up to miminum stack alignment, in wordSize 757 comp_words_on_stack = align_up(comp_words_on_stack, 2); 758 // Now compute the distance from Lesp to SP. This calculation does not 759 // include the space for total_args_passed because Lesp has not yet popped 760 // the arguments. 761 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 762 } 763 764 // Now generate the shuffle code. Pick up all register args and move the 765 // rest through G1_scratch. 766 for (int i = 0; i < total_args_passed; i++) { 767 if (sig_bt[i] == T_VOID) { 768 // Longs and doubles are passed in native word order, but misaligned 769 // in the 32-bit build. 770 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 771 continue; 772 } 773 774 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the 775 // 32-bit build and aligned in the 64-bit build. Look for the obvious 776 // ldx/lddf optimizations. 777 778 // Load in argument order going down. 779 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize; 780 set_Rdisp(G1_scratch); 781 782 VMReg r_1 = regs[i].first(); 783 VMReg r_2 = regs[i].second(); 784 if (!r_1->is_valid()) { 785 assert(!r_2->is_valid(), ""); 786 continue; 787 } 788 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9 789 r_1 = F8->as_VMReg(); // as part of the load/store shuffle 790 if (r_2->is_valid()) r_2 = r_1->next(); 791 } 792 if (r_1->is_Register()) { // Register argument 793 Register r = r_1->as_Register()->after_restore(); 794 if (!r_2->is_valid()) { 795 __ ld(Gargs, arg_slot(ld_off), r); 796 } else { 797 // In V9, longs are given 2 64-bit slots in the interpreter, but the 798 // data is passed in only 1 slot. 799 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ? 800 next_arg_slot(ld_off) : arg_slot(ld_off); 801 __ ldx(Gargs, slot, r); 802 } 803 } else { 804 assert(r_1->is_FloatRegister(), ""); 805 if (!r_2->is_valid()) { 806 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); 807 } else { 808 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 809 // data is passed in only 1 slot. This code also handles longs that 810 // are passed on the stack, but need a stack-to-stack move through a 811 // spare float register. 812 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? 813 next_arg_slot(ld_off) : arg_slot(ld_off); 814 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); 815 } 816 } 817 // Was the argument really intended to be on the stack, but was loaded 818 // into F8/F9? 819 if (regs[i].first()->is_stack()) { 820 assert(r_1->as_FloatRegister() == F8, "fix this code"); 821 // Convert stack slot to an SP offset 822 int st_off = reg2offset(regs[i].first()) + STACK_BIAS; 823 // Store down the shuffled stack word. Target address _is_ aligned. 824 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp); 825 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot); 826 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot); 827 } 828 } 829 830 // Jump to the compiled code just as if compiled code was doing it. 831 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3); 832 #if INCLUDE_JVMCI 833 if (EnableJVMCI) { 834 // check if this call should be routed towards a specific entry point 835 __ ld(Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), G1); 836 __ cmp(G0, G1); 837 Label no_alternative_target; 838 __ br(Assembler::equal, false, Assembler::pn, no_alternative_target); 839 __ delayed()->nop(); 840 841 __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()), G3); 842 __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 843 844 __ bind(no_alternative_target); 845 } 846 #endif // INCLUDE_JVMCI 847 848 // 6243940 We might end up in handle_wrong_method if 849 // the callee is deoptimized as we race thru here. If that 850 // happens we don't want to take a safepoint because the 851 // caller frame will look interpreted and arguments are now 852 // "compiled" so it is much better to make this transition 853 // invisible to the stack walking code. Unfortunately if 854 // we try and find the callee by normal means a safepoint 855 // is possible. So we stash the desired callee in the thread 856 // and the vm will find there should this case occur. 857 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); 858 __ st_ptr(G5_method, callee_target_addr); 859 __ jmpl(G3, 0, G0); 860 __ delayed()->nop(); 861 } 862 863 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 864 int total_args_passed, 865 int comp_args_on_stack, 866 const BasicType *sig_bt, 867 const VMRegPair *regs) { 868 AdapterGenerator agen(masm); 869 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 870 } 871 872 // --------------------------------------------------------------- 873 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 874 int total_args_passed, 875 // VMReg max_arg, 876 int comp_args_on_stack, // VMRegStackSlots 877 const BasicType *sig_bt, 878 const VMRegPair *regs, 879 AdapterFingerPrint* fingerprint) { 880 address i2c_entry = __ pc(); 881 882 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 883 884 885 // ------------------------------------------------------------------------- 886 // Generate a C2I adapter. On entry we know G5 holds the Method*. The 887 // args start out packed in the compiled layout. They need to be unpacked 888 // into the interpreter layout. This will almost always require some stack 889 // space. We grow the current (compiled) stack, then repack the args. We 890 // finally end in a jump to the generic interpreter entry point. On exit 891 // from the interpreter, the interpreter will restore our SP (lest the 892 // compiled code, which relys solely on SP and not FP, get sick). 893 894 address c2i_unverified_entry = __ pc(); 895 Label L_skip_fixup; 896 { 897 Register R_temp = G1; // another scratch register 898 899 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 900 901 __ verify_oop(O0); 902 __ load_klass(O0, G3_scratch); 903 904 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp); 905 __ cmp(G3_scratch, R_temp); 906 907 Label ok, ok2; 908 __ brx(Assembler::equal, false, Assembler::pt, ok); 909 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_metadata_offset(), G5_method); 910 __ jump_to(ic_miss, G3_scratch); 911 __ delayed()->nop(); 912 913 __ bind(ok); 914 // Method might have been compiled since the call site was patched to 915 // interpreted if that is the case treat it as a miss so we can get 916 // the call site corrected. 917 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 918 __ bind(ok2); 919 __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup); 920 __ delayed()->nop(); 921 __ jump_to(ic_miss, G3_scratch); 922 __ delayed()->nop(); 923 924 } 925 926 address c2i_entry = __ pc(); 927 AdapterGenerator agen(masm); 928 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup); 929 930 __ flush(); 931 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 932 933 } 934 935 // Helper function for native calling conventions 936 static VMReg int_stk_helper( int i ) { 937 // Bias any stack based VMReg we get by ignoring the window area 938 // but not the register parameter save area. 939 // 940 // This is strange for the following reasons. We'd normally expect 941 // the calling convention to return an VMReg for a stack slot 942 // completely ignoring any abi reserved area. C2 thinks of that 943 // abi area as only out_preserve_stack_slots. This does not include 944 // the area allocated by the C abi to store down integer arguments 945 // because the java calling convention does not use it. So 946 // since c2 assumes that there are only out_preserve_stack_slots 947 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack 948 // location the c calling convention must add in this bias amount 949 // to make up for the fact that the out_preserve_stack_slots is 950 // insufficient for C calls. What a mess. I sure hope those 6 951 // stack words were worth it on every java call! 952 953 // Another way of cleaning this up would be for out_preserve_stack_slots 954 // to take a parameter to say whether it was C or java calling conventions. 955 // Then things might look a little better (but not much). 956 957 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM; 958 if( mem_parm_offset < 0 ) { 959 return as_oRegister(i)->as_VMReg(); 960 } else { 961 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word; 962 // Now return a biased offset that will be correct when out_preserve_slots is added back in 963 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots()); 964 } 965 } 966 967 968 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 969 VMRegPair *regs, 970 VMRegPair *regs2, 971 int total_args_passed) { 972 assert(regs2 == NULL, "not needed on sparc"); 973 974 // Return the number of VMReg stack_slots needed for the args. 975 // This value does not include an abi space (like register window 976 // save area). 977 978 // The native convention is V8 if !LP64 979 // The LP64 convention is the V9 convention which is slightly more sane. 980 981 // We return the amount of VMReg stack slots we need to reserve for all 982 // the arguments NOT counting out_preserve_stack_slots. Since we always 983 // have space for storing at least 6 registers to memory we start with that. 984 // See int_stk_helper for a further discussion. 985 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); 986 987 // V9 convention: All things "as-if" on double-wide stack slots. 988 // Hoist any int/ptr/long's in the first 6 to int regs. 989 // Hoist any flt/dbl's in the first 16 dbl regs. 990 int j = 0; // Count of actual args, not HALVES 991 VMRegPair param_array_reg; // location of the argument in the parameter array 992 for (int i = 0; i < total_args_passed; i++, j++) { 993 param_array_reg.set_bad(); 994 switch (sig_bt[i]) { 995 case T_BOOLEAN: 996 case T_BYTE: 997 case T_CHAR: 998 case T_INT: 999 case T_SHORT: 1000 regs[i].set1(int_stk_helper(j)); 1001 break; 1002 case T_LONG: 1003 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 1004 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1005 case T_ARRAY: 1006 case T_OBJECT: 1007 case T_METADATA: 1008 regs[i].set2(int_stk_helper(j)); 1009 break; 1010 case T_FLOAT: 1011 // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here 1012 // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz 1013 // 1014 // "When a callee prototype exists, and does not indicate variable arguments, 1015 // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248 1016 // will be promoted to floating-point registers" 1017 // 1018 // By "promoted" it means that the argument is located in two places, an unused 1019 // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live 1020 // float register. In most cases, there are 6 or fewer arguments of any type, 1021 // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive) 1022 // serve as shadow slots. Per the spec floating point registers %d6 to %d16 1023 // require slots beyond that (up to %sp+BIAS+248). 1024 // 1025 { 1026 // V9ism: floats go in ODD registers and stack slots 1027 int float_index = 1 + (j << 1); 1028 param_array_reg.set1(VMRegImpl::stack2reg(float_index)); 1029 if (j < 16) { 1030 regs[i].set1(as_FloatRegister(float_index)->as_VMReg()); 1031 } else { 1032 regs[i] = param_array_reg; 1033 } 1034 } 1035 break; 1036 case T_DOUBLE: 1037 { 1038 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 1039 // V9ism: doubles go in EVEN/ODD regs and stack slots 1040 int double_index = (j << 1); 1041 param_array_reg.set2(VMRegImpl::stack2reg(double_index)); 1042 if (j < 16) { 1043 regs[i].set2(as_FloatRegister(double_index)->as_VMReg()); 1044 } else { 1045 // V9ism: doubles go in EVEN/ODD stack slots 1046 regs[i] = param_array_reg; 1047 } 1048 } 1049 break; 1050 case T_VOID: 1051 regs[i].set_bad(); 1052 j--; 1053 break; // Do not count HALVES 1054 default: 1055 ShouldNotReachHere(); 1056 } 1057 // Keep track of the deepest parameter array slot. 1058 if (!param_array_reg.first()->is_valid()) { 1059 param_array_reg = regs[i]; 1060 } 1061 if (param_array_reg.first()->is_stack()) { 1062 int off = param_array_reg.first()->reg2stack(); 1063 if (off > max_stack_slots) max_stack_slots = off; 1064 } 1065 if (param_array_reg.second()->is_stack()) { 1066 int off = param_array_reg.second()->reg2stack(); 1067 if (off > max_stack_slots) max_stack_slots = off; 1068 } 1069 } 1070 return align_up(max_stack_slots + 1, 2); 1071 1072 } 1073 1074 1075 // --------------------------------------------------------------------------- 1076 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1077 switch (ret_type) { 1078 case T_FLOAT: 1079 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS); 1080 break; 1081 case T_DOUBLE: 1082 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS); 1083 break; 1084 } 1085 } 1086 1087 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1088 switch (ret_type) { 1089 case T_FLOAT: 1090 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0); 1091 break; 1092 case T_DOUBLE: 1093 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0); 1094 break; 1095 } 1096 } 1097 1098 // Check and forward and pending exception. Thread is stored in 1099 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there 1100 // is no exception handler. We merely pop this frame off and throw the 1101 // exception in the caller's frame. 1102 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) { 1103 Label L; 1104 __ br_null(Rex_oop, false, Assembler::pt, L); 1105 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1106 // Since this is a native call, we *know* the proper exception handler 1107 // without calling into the VM: it's the empty function. Just pop this 1108 // frame and then jump to forward_exception_entry; O7 will contain the 1109 // native caller's return PC. 1110 AddressLiteral exception_entry(StubRoutines::forward_exception_entry()); 1111 __ jump_to(exception_entry, G3_scratch); 1112 __ delayed()->restore(); // Pop this frame off. 1113 __ bind(L); 1114 } 1115 1116 // A simple move of integer like type 1117 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1118 if (src.first()->is_stack()) { 1119 if (dst.first()->is_stack()) { 1120 // stack to stack 1121 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1122 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1123 } else { 1124 // stack to reg 1125 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1126 } 1127 } else if (dst.first()->is_stack()) { 1128 // reg to stack 1129 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1130 } else { 1131 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1132 } 1133 } 1134 1135 // On 64 bit we will store integer like items to the stack as 1136 // 64 bits items (sparc abi) even though java would only store 1137 // 32bits for a parameter. On 32bit it will simply be 32 bits 1138 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 1139 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1140 if (src.first()->is_stack()) { 1141 if (dst.first()->is_stack()) { 1142 // stack to stack 1143 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1144 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1145 } else { 1146 // stack to reg 1147 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1148 } 1149 } else if (dst.first()->is_stack()) { 1150 // reg to stack 1151 // Some compilers (gcc) expect a clean 32 bit value on function entry 1152 __ signx(src.first()->as_Register(), L5); 1153 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1154 } else { 1155 // Some compilers (gcc) expect a clean 32 bit value on function entry 1156 __ signx(src.first()->as_Register(), dst.first()->as_Register()); 1157 } 1158 } 1159 1160 1161 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1162 if (src.first()->is_stack()) { 1163 if (dst.first()->is_stack()) { 1164 // stack to stack 1165 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1166 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1167 } else { 1168 // stack to reg 1169 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1170 } 1171 } else if (dst.first()->is_stack()) { 1172 // reg to stack 1173 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1174 } else { 1175 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1176 } 1177 } 1178 1179 1180 // An oop arg. Must pass a handle not the oop itself 1181 static void object_move(MacroAssembler* masm, 1182 OopMap* map, 1183 int oop_handle_offset, 1184 int framesize_in_slots, 1185 VMRegPair src, 1186 VMRegPair dst, 1187 bool is_receiver, 1188 int* receiver_offset) { 1189 1190 // must pass a handle. First figure out the location we use as a handle 1191 1192 if (src.first()->is_stack()) { 1193 // Oop is already on the stack 1194 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); 1195 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); 1196 __ ld_ptr(rHandle, 0, L4); 1197 __ movr( Assembler::rc_z, L4, G0, rHandle ); 1198 if (dst.first()->is_stack()) { 1199 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1200 } 1201 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1202 if (is_receiver) { 1203 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1204 } 1205 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1206 } else { 1207 // Oop is in an input register pass we must flush it to the stack 1208 const Register rOop = src.first()->as_Register(); 1209 const Register rHandle = L5; 1210 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; 1211 int offset = oop_slot * VMRegImpl::stack_slot_size; 1212 __ st_ptr(rOop, SP, offset + STACK_BIAS); 1213 if (is_receiver) { 1214 *receiver_offset = offset; 1215 } 1216 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1217 __ add(SP, offset + STACK_BIAS, rHandle); 1218 __ movr( Assembler::rc_z, rOop, G0, rHandle ); 1219 1220 if (dst.first()->is_stack()) { 1221 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1222 } else { 1223 __ mov(rHandle, dst.first()->as_Register()); 1224 } 1225 } 1226 } 1227 1228 // A float arg may have to do float reg int reg conversion 1229 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1230 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1231 1232 if (src.first()->is_stack()) { 1233 if (dst.first()->is_stack()) { 1234 // stack to stack the easiest of the bunch 1235 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1236 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1237 } else { 1238 // stack to reg 1239 if (dst.first()->is_Register()) { 1240 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1241 } else { 1242 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1243 } 1244 } 1245 } else if (dst.first()->is_stack()) { 1246 // reg to stack 1247 if (src.first()->is_Register()) { 1248 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1249 } else { 1250 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1251 } 1252 } else { 1253 // reg to reg 1254 if (src.first()->is_Register()) { 1255 if (dst.first()->is_Register()) { 1256 // gpr -> gpr 1257 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1258 } else { 1259 // gpr -> fpr 1260 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS); 1261 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister()); 1262 } 1263 } else if (dst.first()->is_Register()) { 1264 // fpr -> gpr 1265 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS); 1266 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register()); 1267 } else { 1268 // fpr -> fpr 1269 // In theory these overlap but the ordering is such that this is likely a nop 1270 if ( src.first() != dst.first()) { 1271 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1272 } 1273 } 1274 } 1275 } 1276 1277 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1278 VMRegPair src_lo(src.first()); 1279 VMRegPair src_hi(src.second()); 1280 VMRegPair dst_lo(dst.first()); 1281 VMRegPair dst_hi(dst.second()); 1282 simple_move32(masm, src_lo, dst_lo); 1283 simple_move32(masm, src_hi, dst_hi); 1284 } 1285 1286 // A long move 1287 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1288 1289 // Do the simple ones here else do two int moves 1290 if (src.is_single_phys_reg() ) { 1291 if (dst.is_single_phys_reg()) { 1292 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1293 } else { 1294 // split src into two separate registers 1295 // Remember hi means hi address or lsw on sparc 1296 // Move msw to lsw 1297 if (dst.second()->is_reg()) { 1298 // MSW -> MSW 1299 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register()); 1300 // Now LSW -> LSW 1301 // this will only move lo -> lo and ignore hi 1302 VMRegPair split(dst.second()); 1303 simple_move32(masm, src, split); 1304 } else { 1305 VMRegPair split(src.first(), L4->as_VMReg()); 1306 // MSW -> MSW (lo ie. first word) 1307 __ srax(src.first()->as_Register(), 32, L4); 1308 split_long_move(masm, split, dst); 1309 } 1310 } 1311 } else if (dst.is_single_phys_reg()) { 1312 if (src.is_adjacent_aligned_on_stack(2)) { 1313 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1314 } else { 1315 // dst is a single reg. 1316 // Remember lo is low address not msb for stack slots 1317 // and lo is the "real" register for registers 1318 // src is 1319 1320 VMRegPair split; 1321 1322 if (src.first()->is_reg()) { 1323 // src.lo (msw) is a reg, src.hi is stk/reg 1324 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg] 1325 split.set_pair(dst.first(), src.first()); 1326 } else { 1327 // msw is stack move to L5 1328 // lsw is stack move to dst.lo (real reg) 1329 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5 1330 split.set_pair(dst.first(), L5->as_VMReg()); 1331 } 1332 1333 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg) 1334 // msw -> src.lo/L5, lsw -> dst.lo 1335 split_long_move(masm, src, split); 1336 1337 // So dst now has the low order correct position the 1338 // msw half 1339 __ sllx(split.first()->as_Register(), 32, L5); 1340 1341 const Register d = dst.first()->as_Register(); 1342 __ or3(L5, d, d); 1343 } 1344 } else { 1345 // For LP64 we can probably do better. 1346 split_long_move(masm, src, dst); 1347 } 1348 } 1349 1350 // A double move 1351 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1352 1353 // The painful thing here is that like long_move a VMRegPair might be 1354 // 1: a single physical register 1355 // 2: two physical registers (v8) 1356 // 3: a physical reg [lo] and a stack slot [hi] (v8) 1357 // 4: two stack slots 1358 1359 // Since src is always a java calling convention we know that the src pair 1360 // is always either all registers or all stack (and aligned?) 1361 1362 // in a register [lo] and a stack slot [hi] 1363 if (src.first()->is_stack()) { 1364 if (dst.first()->is_stack()) { 1365 // stack to stack the easiest of the bunch 1366 // ought to be a way to do this where if alignment is ok we use ldd/std when possible 1367 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1368 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1369 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1370 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1371 } else { 1372 // stack to reg 1373 if (dst.second()->is_stack()) { 1374 // stack -> reg, stack -> stack 1375 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1376 if (dst.first()->is_Register()) { 1377 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1378 } else { 1379 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1380 } 1381 // This was missing. (very rare case) 1382 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1383 } else { 1384 // stack -> reg 1385 // Eventually optimize for alignment QQQ 1386 if (dst.first()->is_Register()) { 1387 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1388 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register()); 1389 } else { 1390 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1391 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister()); 1392 } 1393 } 1394 } 1395 } else if (dst.first()->is_stack()) { 1396 // reg to stack 1397 if (src.first()->is_Register()) { 1398 // Eventually optimize for alignment QQQ 1399 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1400 if (src.second()->is_stack()) { 1401 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1402 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1403 } else { 1404 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS); 1405 } 1406 } else { 1407 // fpr to stack 1408 if (src.second()->is_stack()) { 1409 ShouldNotReachHere(); 1410 } else { 1411 // Is the stack aligned? 1412 if (reg2offset(dst.first()) & 0x7) { 1413 // No do as pairs 1414 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1415 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS); 1416 } else { 1417 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1418 } 1419 } 1420 } 1421 } else { 1422 // reg to reg 1423 if (src.first()->is_Register()) { 1424 if (dst.first()->is_Register()) { 1425 // gpr -> gpr 1426 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1427 __ mov(src.second()->as_Register(), dst.second()->as_Register()); 1428 } else { 1429 // gpr -> fpr 1430 // ought to be able to do a single store 1431 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS); 1432 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS); 1433 // ought to be able to do a single load 1434 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister()); 1435 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister()); 1436 } 1437 } else if (dst.first()->is_Register()) { 1438 // fpr -> gpr 1439 // ought to be able to do a single store 1440 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS); 1441 // ought to be able to do a single load 1442 // REMEMBER first() is low address not LSB 1443 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register()); 1444 if (dst.second()->is_Register()) { 1445 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register()); 1446 } else { 1447 __ ld(FP, -4 + STACK_BIAS, L4); 1448 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1449 } 1450 } else { 1451 // fpr -> fpr 1452 // In theory these overlap but the ordering is such that this is likely a nop 1453 if ( src.first() != dst.first()) { 1454 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1455 } 1456 } 1457 } 1458 } 1459 1460 // Creates an inner frame if one hasn't already been created, and 1461 // saves a copy of the thread in L7_thread_cache 1462 static void create_inner_frame(MacroAssembler* masm, bool* already_created) { 1463 if (!*already_created) { 1464 __ save_frame(0); 1465 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below 1466 // Don't use save_thread because it smashes G2 and we merely want to save a 1467 // copy 1468 __ mov(G2_thread, L7_thread_cache); 1469 *already_created = true; 1470 } 1471 } 1472 1473 1474 static void save_or_restore_arguments(MacroAssembler* masm, 1475 const int stack_slots, 1476 const int total_in_args, 1477 const int arg_save_area, 1478 OopMap* map, 1479 VMRegPair* in_regs, 1480 BasicType* in_sig_bt) { 1481 // if map is non-NULL then the code should store the values, 1482 // otherwise it should load them. 1483 if (map != NULL) { 1484 // Fill in the map 1485 for (int i = 0; i < total_in_args; i++) { 1486 if (in_sig_bt[i] == T_ARRAY) { 1487 if (in_regs[i].first()->is_stack()) { 1488 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1489 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1490 } else if (in_regs[i].first()->is_Register()) { 1491 map->set_oop(in_regs[i].first()); 1492 } else { 1493 ShouldNotReachHere(); 1494 } 1495 } 1496 } 1497 } 1498 1499 // Save or restore double word values 1500 int handle_index = 0; 1501 for (int i = 0; i < total_in_args; i++) { 1502 int slot = handle_index + arg_save_area; 1503 int offset = slot * VMRegImpl::stack_slot_size; 1504 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) { 1505 const Register reg = in_regs[i].first()->as_Register(); 1506 if (reg->is_global()) { 1507 handle_index += 2; 1508 assert(handle_index <= stack_slots, "overflow"); 1509 if (map != NULL) { 1510 __ stx(reg, SP, offset + STACK_BIAS); 1511 } else { 1512 __ ldx(SP, offset + STACK_BIAS, reg); 1513 } 1514 } 1515 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) { 1516 handle_index += 2; 1517 assert(handle_index <= stack_slots, "overflow"); 1518 if (map != NULL) { 1519 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1520 } else { 1521 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1522 } 1523 } 1524 } 1525 // Save floats 1526 for (int i = 0; i < total_in_args; i++) { 1527 int slot = handle_index + arg_save_area; 1528 int offset = slot * VMRegImpl::stack_slot_size; 1529 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) { 1530 handle_index++; 1531 assert(handle_index <= stack_slots, "overflow"); 1532 if (map != NULL) { 1533 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1534 } else { 1535 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1536 } 1537 } 1538 } 1539 1540 } 1541 1542 1543 // Check GCLocker::needs_gc and enter the runtime if it's true. This 1544 // keeps a new JNI critical region from starting until a GC has been 1545 // forced. Save down any oops in registers and describe them in an 1546 // OopMap. 1547 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1548 const int stack_slots, 1549 const int total_in_args, 1550 const int arg_save_area, 1551 OopMapSet* oop_maps, 1552 VMRegPair* in_regs, 1553 BasicType* in_sig_bt) { 1554 __ block_comment("check GCLocker::needs_gc"); 1555 Label cont; 1556 AddressLiteral sync_state(GCLocker::needs_gc_address()); 1557 __ load_bool_contents(sync_state, G3_scratch); 1558 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont); 1559 __ delayed()->nop(); 1560 1561 // Save down any values that are live in registers and call into the 1562 // runtime to halt for a GC 1563 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1564 save_or_restore_arguments(masm, stack_slots, total_in_args, 1565 arg_save_area, map, in_regs, in_sig_bt); 1566 1567 __ mov(G2_thread, L7_thread_cache); 1568 1569 __ set_last_Java_frame(SP, noreg); 1570 1571 __ block_comment("block_for_jni_critical"); 1572 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type); 1573 __ delayed()->mov(L7_thread_cache, O0); 1574 oop_maps->add_gc_map( __ offset(), map); 1575 1576 __ restore_thread(L7_thread_cache); // restore G2_thread 1577 __ reset_last_Java_frame(); 1578 1579 // Reload all the register arguments 1580 save_or_restore_arguments(masm, stack_slots, total_in_args, 1581 arg_save_area, NULL, in_regs, in_sig_bt); 1582 1583 __ bind(cont); 1584 #ifdef ASSERT 1585 if (StressCriticalJNINatives) { 1586 // Stress register saving 1587 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1588 save_or_restore_arguments(masm, stack_slots, total_in_args, 1589 arg_save_area, map, in_regs, in_sig_bt); 1590 // Destroy argument registers 1591 for (int i = 0; i < total_in_args; i++) { 1592 if (in_regs[i].first()->is_Register()) { 1593 const Register reg = in_regs[i].first()->as_Register(); 1594 if (reg->is_global()) { 1595 __ mov(G0, reg); 1596 } 1597 } else if (in_regs[i].first()->is_FloatRegister()) { 1598 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1599 } 1600 } 1601 1602 save_or_restore_arguments(masm, stack_slots, total_in_args, 1603 arg_save_area, NULL, in_regs, in_sig_bt); 1604 } 1605 #endif 1606 } 1607 1608 // Unpack an array argument into a pointer to the body and the length 1609 // if the array is non-null, otherwise pass 0 for both. 1610 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { 1611 // Pass the length, ptr pair 1612 Label is_null, done; 1613 if (reg.first()->is_stack()) { 1614 VMRegPair tmp = reg64_to_VMRegPair(L2); 1615 // Load the arg up from the stack 1616 move_ptr(masm, reg, tmp); 1617 reg = tmp; 1618 } 1619 __ cmp(reg.first()->as_Register(), G0); 1620 __ brx(Assembler::equal, false, Assembler::pt, is_null); 1621 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4); 1622 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg); 1623 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4); 1624 move32_64(masm, reg64_to_VMRegPair(L4), length_arg); 1625 __ ba_short(done); 1626 __ bind(is_null); 1627 // Pass zeros 1628 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg); 1629 move32_64(masm, reg64_to_VMRegPair(G0), length_arg); 1630 __ bind(done); 1631 } 1632 1633 static void verify_oop_args(MacroAssembler* masm, 1634 const methodHandle& method, 1635 const BasicType* sig_bt, 1636 const VMRegPair* regs) { 1637 Register temp_reg = G5_method; // not part of any compiled calling seq 1638 if (VerifyOops) { 1639 for (int i = 0; i < method->size_of_parameters(); i++) { 1640 if (sig_bt[i] == T_OBJECT || 1641 sig_bt[i] == T_ARRAY) { 1642 VMReg r = regs[i].first(); 1643 assert(r->is_valid(), "bad oop arg"); 1644 if (r->is_stack()) { 1645 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1646 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg); 1647 __ ld_ptr(SP, ld_off, temp_reg); 1648 __ verify_oop(temp_reg); 1649 } else { 1650 __ verify_oop(r->as_Register()); 1651 } 1652 } 1653 } 1654 } 1655 } 1656 1657 static void gen_special_dispatch(MacroAssembler* masm, 1658 const methodHandle& method, 1659 const BasicType* sig_bt, 1660 const VMRegPair* regs) { 1661 verify_oop_args(masm, method, sig_bt, regs); 1662 vmIntrinsics::ID iid = method->intrinsic_id(); 1663 1664 // Now write the args into the outgoing interpreter space 1665 bool has_receiver = false; 1666 Register receiver_reg = noreg; 1667 int member_arg_pos = -1; 1668 Register member_reg = noreg; 1669 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1670 if (ref_kind != 0) { 1671 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1672 member_reg = G5_method; // known to be free at this point 1673 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1674 } else if (iid == vmIntrinsics::_invokeBasic) { 1675 has_receiver = true; 1676 } else { 1677 fatal("unexpected intrinsic id %d", iid); 1678 } 1679 1680 if (member_reg != noreg) { 1681 // Load the member_arg into register, if necessary. 1682 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1683 VMReg r = regs[member_arg_pos].first(); 1684 if (r->is_stack()) { 1685 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1686 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1687 __ ld_ptr(SP, ld_off, member_reg); 1688 } else { 1689 // no data motion is needed 1690 member_reg = r->as_Register(); 1691 } 1692 } 1693 1694 if (has_receiver) { 1695 // Make sure the receiver is loaded into a register. 1696 assert(method->size_of_parameters() > 0, "oob"); 1697 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1698 VMReg r = regs[0].first(); 1699 assert(r->is_valid(), "bad receiver arg"); 1700 if (r->is_stack()) { 1701 // Porting note: This assumes that compiled calling conventions always 1702 // pass the receiver oop in a register. If this is not true on some 1703 // platform, pick a temp and load the receiver from stack. 1704 fatal("receiver always in a register"); 1705 receiver_reg = G3_scratch; // known to be free at this point 1706 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1707 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1708 __ ld_ptr(SP, ld_off, receiver_reg); 1709 } else { 1710 // no data motion is needed 1711 receiver_reg = r->as_Register(); 1712 } 1713 } 1714 1715 // Figure out which address we are really jumping to: 1716 MethodHandles::generate_method_handle_dispatch(masm, iid, 1717 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1718 } 1719 1720 // --------------------------------------------------------------------------- 1721 // Generate a native wrapper for a given method. The method takes arguments 1722 // in the Java compiled code convention, marshals them to the native 1723 // convention (handlizes oops, etc), transitions to native, makes the call, 1724 // returns to java state (possibly blocking), unhandlizes any result and 1725 // returns. 1726 // 1727 // Critical native functions are a shorthand for the use of 1728 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1729 // functions. The wrapper is expected to unpack the arguments before 1730 // passing them to the callee and perform checks before and after the 1731 // native call to ensure that they GCLocker 1732 // lock_critical/unlock_critical semantics are followed. Some other 1733 // parts of JNI setup are skipped like the tear down of the JNI handle 1734 // block and the check for pending exceptions it's impossible for them 1735 // to be thrown. 1736 // 1737 // They are roughly structured like this: 1738 // if (GCLocker::needs_gc()) 1739 // SharedRuntime::block_for_jni_critical(); 1740 // tranistion to thread_in_native 1741 // unpack arrray arguments and call native entry point 1742 // check for safepoint in progress 1743 // check if any thread suspend flags are set 1744 // call into JVM and possible unlock the JNI critical 1745 // if a GC was suppressed while in the critical native. 1746 // transition back to thread_in_Java 1747 // return to caller 1748 // 1749 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1750 const methodHandle& method, 1751 int compile_id, 1752 BasicType* in_sig_bt, 1753 VMRegPair* in_regs, 1754 BasicType ret_type, 1755 address critical_entry) { 1756 if (method->is_method_handle_intrinsic()) { 1757 vmIntrinsics::ID iid = method->intrinsic_id(); 1758 intptr_t start = (intptr_t)__ pc(); 1759 int vep_offset = ((intptr_t)__ pc()) - start; 1760 gen_special_dispatch(masm, 1761 method, 1762 in_sig_bt, 1763 in_regs); 1764 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1765 __ flush(); 1766 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1767 return nmethod::new_native_nmethod(method, 1768 compile_id, 1769 masm->code(), 1770 vep_offset, 1771 frame_complete, 1772 stack_slots / VMRegImpl::slots_per_word, 1773 in_ByteSize(-1), 1774 in_ByteSize(-1), 1775 (OopMapSet*)NULL); 1776 } 1777 bool is_critical_native = true; 1778 address native_func = critical_entry; 1779 if (native_func == NULL) { 1780 native_func = method->native_function(); 1781 is_critical_native = false; 1782 } 1783 assert(native_func != NULL, "must have function"); 1784 1785 // Native nmethod wrappers never take possesion of the oop arguments. 1786 // So the caller will gc the arguments. The only thing we need an 1787 // oopMap for is if the call is static 1788 // 1789 // An OopMap for lock (and class if static), and one for the VM call itself 1790 OopMapSet *oop_maps = new OopMapSet(); 1791 intptr_t start = (intptr_t)__ pc(); 1792 1793 // First thing make an ic check to see if we should even be here 1794 { 1795 Label L; 1796 const Register temp_reg = G3_scratch; 1797 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1798 __ verify_oop(O0); 1799 __ load_klass(O0, temp_reg); 1800 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L); 1801 1802 __ jump_to(ic_miss, temp_reg); 1803 __ delayed()->nop(); 1804 __ align(CodeEntryAlignment); 1805 __ bind(L); 1806 } 1807 1808 int vep_offset = ((intptr_t)__ pc()) - start; 1809 1810 #ifdef COMPILER1 1811 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) { 1812 // Object.hashCode, System.identityHashCode can pull the hashCode from the 1813 // header word instead of doing a full VM transition once it's been computed. 1814 // Since hashCode is usually polymorphic at call sites we can't do this 1815 // optimization at the call site without a lot of work. 1816 Label slowCase; 1817 Label done; 1818 Register obj_reg = O0; 1819 Register result = O0; 1820 Register header = G3_scratch; 1821 Register hash = G3_scratch; // overwrite header value with hash value 1822 Register mask = G1; // to get hash field from header 1823 1824 // Unlike for Object.hashCode, System.identityHashCode is static method and 1825 // gets object as argument instead of the receiver. 1826 if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) { 1827 assert(method->is_static(), "method should be static"); 1828 // return 0 for null reference input 1829 __ br_null(obj_reg, false, Assembler::pn, done); 1830 __ delayed()->mov(obj_reg, hash); 1831 } 1832 1833 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked. 1834 // We depend on hash_mask being at most 32 bits and avoid the use of 1835 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 1836 // vm: see markWord.hpp. 1837 __ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header); 1838 __ sethi(markWord::hash_mask, mask); 1839 __ btst(markWord::unlocked_value, header); 1840 __ br(Assembler::zero, false, Assembler::pn, slowCase); 1841 if (UseBiasedLocking) { 1842 // Check if biased and fall through to runtime if so 1843 __ delayed()->nop(); 1844 __ btst(markWord::biased_lock_bit_in_place, header); 1845 __ br(Assembler::notZero, false, Assembler::pn, slowCase); 1846 } 1847 __ delayed()->or3(mask, markWord::hash_mask & 0x3ff, mask); 1848 1849 // Check for a valid (non-zero) hash code and get its value. 1850 __ srlx(header, markWord::hash_shift, hash); 1851 __ andcc(hash, mask, hash); 1852 __ br(Assembler::equal, false, Assembler::pn, slowCase); 1853 __ delayed()->nop(); 1854 1855 // leaf return. 1856 __ bind(done); 1857 __ retl(); 1858 __ delayed()->mov(hash, result); 1859 __ bind(slowCase); 1860 } 1861 #endif // COMPILER1 1862 1863 1864 // We have received a description of where all the java arg are located 1865 // on entry to the wrapper. We need to convert these args to where 1866 // the jni function will expect them. To figure out where they go 1867 // we convert the java signature to a C signature by inserting 1868 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1869 1870 const int total_in_args = method->size_of_parameters(); 1871 int total_c_args = total_in_args; 1872 int total_save_slots = 6 * VMRegImpl::slots_per_word; 1873 if (!is_critical_native) { 1874 total_c_args += 1; 1875 if (method->is_static()) { 1876 total_c_args++; 1877 } 1878 } else { 1879 for (int i = 0; i < total_in_args; i++) { 1880 if (in_sig_bt[i] == T_ARRAY) { 1881 // These have to be saved and restored across the safepoint 1882 total_c_args++; 1883 } 1884 } 1885 } 1886 1887 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1888 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1889 BasicType* in_elem_bt = NULL; 1890 1891 int argc = 0; 1892 if (!is_critical_native) { 1893 out_sig_bt[argc++] = T_ADDRESS; 1894 if (method->is_static()) { 1895 out_sig_bt[argc++] = T_OBJECT; 1896 } 1897 1898 for (int i = 0; i < total_in_args ; i++ ) { 1899 out_sig_bt[argc++] = in_sig_bt[i]; 1900 } 1901 } else { 1902 Thread* THREAD = Thread::current(); 1903 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); 1904 SignatureStream ss(method->signature()); 1905 for (int i = 0; i < total_in_args ; i++ ) { 1906 if (in_sig_bt[i] == T_ARRAY) { 1907 // Arrays are passed as int, elem* pair 1908 out_sig_bt[argc++] = T_INT; 1909 out_sig_bt[argc++] = T_ADDRESS; 1910 Symbol* atype = ss.as_symbol(); 1911 const char* at = atype->as_C_string(); 1912 if (strlen(at) == 2) { 1913 assert(at[0] == '[', "must be"); 1914 switch (at[1]) { 1915 case 'B': in_elem_bt[i] = T_BYTE; break; 1916 case 'C': in_elem_bt[i] = T_CHAR; break; 1917 case 'D': in_elem_bt[i] = T_DOUBLE; break; 1918 case 'F': in_elem_bt[i] = T_FLOAT; break; 1919 case 'I': in_elem_bt[i] = T_INT; break; 1920 case 'J': in_elem_bt[i] = T_LONG; break; 1921 case 'S': in_elem_bt[i] = T_SHORT; break; 1922 case 'Z': in_elem_bt[i] = T_BOOLEAN; break; 1923 default: ShouldNotReachHere(); 1924 } 1925 } 1926 } else { 1927 out_sig_bt[argc++] = in_sig_bt[i]; 1928 in_elem_bt[i] = T_VOID; 1929 } 1930 if (in_sig_bt[i] != T_VOID) { 1931 assert(in_sig_bt[i] == ss.type(), "must match"); 1932 ss.next(); 1933 } 1934 } 1935 } 1936 1937 // Now figure out where the args must be stored and how much stack space 1938 // they require (neglecting out_preserve_stack_slots but space for storing 1939 // the 1st six register arguments). It's weird see int_stk_helper. 1940 // 1941 int out_arg_slots; 1942 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 1943 1944 if (is_critical_native) { 1945 // Critical natives may have to call out so they need a save area 1946 // for register arguments. 1947 int double_slots = 0; 1948 int single_slots = 0; 1949 for ( int i = 0; i < total_in_args; i++) { 1950 if (in_regs[i].first()->is_Register()) { 1951 const Register reg = in_regs[i].first()->as_Register(); 1952 switch (in_sig_bt[i]) { 1953 case T_ARRAY: 1954 case T_BOOLEAN: 1955 case T_BYTE: 1956 case T_SHORT: 1957 case T_CHAR: 1958 case T_INT: assert(reg->is_in(), "don't need to save these"); break; 1959 case T_LONG: if (reg->is_global()) double_slots++; break; 1960 default: ShouldNotReachHere(); 1961 } 1962 } else if (in_regs[i].first()->is_FloatRegister()) { 1963 switch (in_sig_bt[i]) { 1964 case T_FLOAT: single_slots++; break; 1965 case T_DOUBLE: double_slots++; break; 1966 default: ShouldNotReachHere(); 1967 } 1968 } 1969 } 1970 total_save_slots = double_slots * 2 + single_slots; 1971 } 1972 1973 // Compute framesize for the wrapper. We need to handlize all oops in 1974 // registers. We must create space for them here that is disjoint from 1975 // the windowed save area because we have no control over when we might 1976 // flush the window again and overwrite values that gc has since modified. 1977 // (The live window race) 1978 // 1979 // We always just allocate 6 word for storing down these object. This allow 1980 // us to simply record the base and use the Ireg number to decide which 1981 // slot to use. (Note that the reg number is the inbound number not the 1982 // outbound number). 1983 // We must shuffle args to match the native convention, and include var-args space. 1984 1985 // Calculate the total number of stack slots we will need. 1986 1987 // First count the abi requirement plus all of the outgoing args 1988 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1989 1990 // Now the space for the inbound oop handle area 1991 1992 int oop_handle_offset = align_up(stack_slots, 2); 1993 stack_slots += total_save_slots; 1994 1995 // Now any space we need for handlizing a klass if static method 1996 1997 int klass_slot_offset = 0; 1998 int klass_offset = -1; 1999 int lock_slot_offset = 0; 2000 bool is_static = false; 2001 2002 if (method->is_static()) { 2003 klass_slot_offset = stack_slots; 2004 stack_slots += VMRegImpl::slots_per_word; 2005 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2006 is_static = true; 2007 } 2008 2009 // Plus a lock if needed 2010 2011 if (method->is_synchronized()) { 2012 lock_slot_offset = stack_slots; 2013 stack_slots += VMRegImpl::slots_per_word; 2014 } 2015 2016 // Now a place to save return value or as a temporary for any gpr -> fpr moves 2017 stack_slots += 2; 2018 2019 // Ok The space we have allocated will look like: 2020 // 2021 // 2022 // FP-> | | 2023 // |---------------------| 2024 // | 2 slots for moves | 2025 // |---------------------| 2026 // | lock box (if sync) | 2027 // |---------------------| <- lock_slot_offset 2028 // | klass (if static) | 2029 // |---------------------| <- klass_slot_offset 2030 // | oopHandle area | 2031 // |---------------------| <- oop_handle_offset 2032 // | outbound memory | 2033 // | based arguments | 2034 // | | 2035 // |---------------------| 2036 // | vararg area | 2037 // |---------------------| 2038 // | | 2039 // SP-> | out_preserved_slots | 2040 // 2041 // 2042 2043 2044 // Now compute actual number of stack words we need rounding to make 2045 // stack properly aligned. 2046 stack_slots = align_up(stack_slots, 2 * VMRegImpl::slots_per_word); 2047 2048 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2049 2050 // Generate stack overflow check before creating frame 2051 __ generate_stack_overflow_check(stack_size); 2052 2053 // Generate a new frame for the wrapper. 2054 __ save(SP, -stack_size, SP); 2055 2056 int frame_complete = ((intptr_t)__ pc()) - start; 2057 2058 __ verify_thread(); 2059 2060 if (is_critical_native) { 2061 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, 2062 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 2063 } 2064 2065 // 2066 // We immediately shuffle the arguments so that any vm call we have to 2067 // make from here on out (sync slow path, jvmti, etc.) we will have 2068 // captured the oops from our caller and have a valid oopMap for 2069 // them. 2070 2071 // ----------------- 2072 // The Grand Shuffle 2073 // 2074 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2075 // (derived from JavaThread* which is in L7_thread_cache) and, if static, 2076 // the class mirror instead of a receiver. This pretty much guarantees that 2077 // register layout will not match. We ignore these extra arguments during 2078 // the shuffle. The shuffle is described by the two calling convention 2079 // vectors we have in our possession. We simply walk the java vector to 2080 // get the source locations and the c vector to get the destinations. 2081 // Because we have a new window and the argument registers are completely 2082 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about 2083 // here. 2084 2085 // This is a trick. We double the stack slots so we can claim 2086 // the oops in the caller's frame. Since we are sure to have 2087 // more args than the caller doubling is enough to make 2088 // sure we can capture all the incoming oop args from the 2089 // caller. 2090 // 2091 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2092 // Record sp-based slot for receiver on stack for non-static methods 2093 int receiver_offset = -1; 2094 2095 // We move the arguments backward because the floating point registers 2096 // destination will always be to a register with a greater or equal register 2097 // number or the stack. 2098 2099 #ifdef ASSERT 2100 bool reg_destroyed[RegisterImpl::number_of_registers]; 2101 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2102 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2103 reg_destroyed[r] = false; 2104 } 2105 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2106 freg_destroyed[f] = false; 2107 } 2108 2109 #endif /* ASSERT */ 2110 2111 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) { 2112 2113 #ifdef ASSERT 2114 if (in_regs[i].first()->is_Register()) { 2115 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2116 } else if (in_regs[i].first()->is_FloatRegister()) { 2117 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!"); 2118 } 2119 if (out_regs[c_arg].first()->is_Register()) { 2120 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2121 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 2122 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true; 2123 } 2124 #endif /* ASSERT */ 2125 2126 switch (in_sig_bt[i]) { 2127 case T_ARRAY: 2128 if (is_critical_native) { 2129 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]); 2130 c_arg--; 2131 break; 2132 } 2133 case T_OBJECT: 2134 assert(!is_critical_native, "no oop arguments"); 2135 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2136 ((i == 0) && (!is_static)), 2137 &receiver_offset); 2138 break; 2139 case T_VOID: 2140 break; 2141 2142 case T_FLOAT: 2143 float_move(masm, in_regs[i], out_regs[c_arg]); 2144 break; 2145 2146 case T_DOUBLE: 2147 assert( i + 1 < total_in_args && 2148 in_sig_bt[i + 1] == T_VOID && 2149 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2150 double_move(masm, in_regs[i], out_regs[c_arg]); 2151 break; 2152 2153 case T_LONG : 2154 long_move(masm, in_regs[i], out_regs[c_arg]); 2155 break; 2156 2157 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2158 2159 default: 2160 move32_64(masm, in_regs[i], out_regs[c_arg]); 2161 } 2162 } 2163 2164 // Pre-load a static method's oop into O1. Used both by locking code and 2165 // the normal JNI call code. 2166 if (method->is_static() && !is_critical_native) { 2167 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1); 2168 2169 // Now handlize the static class mirror in O1. It's known not-null. 2170 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2171 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2172 __ add(SP, klass_offset + STACK_BIAS, O1); 2173 } 2174 2175 2176 const Register L6_handle = L6; 2177 2178 if (method->is_synchronized()) { 2179 assert(!is_critical_native, "unhandled"); 2180 __ mov(O1, L6_handle); 2181 } 2182 2183 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2184 // except O6/O7. So if we must call out we must push a new frame. We immediately 2185 // push a new frame and flush the windows. 2186 intptr_t thepc = (intptr_t) __ pc(); 2187 { 2188 address here = __ pc(); 2189 // Call the next instruction 2190 __ call(here + 8, relocInfo::none); 2191 __ delayed()->nop(); 2192 } 2193 2194 // We use the same pc/oopMap repeatedly when we call out 2195 oop_maps->add_gc_map(thepc - start, map); 2196 2197 // O7 now has the pc loaded that we will use when we finally call to native. 2198 2199 // Save thread in L7; it crosses a bunch of VM calls below 2200 // Don't use save_thread because it smashes G2 and we merely 2201 // want to save a copy 2202 __ mov(G2_thread, L7_thread_cache); 2203 2204 2205 // If we create an inner frame once is plenty 2206 // when we create it we must also save G2_thread 2207 bool inner_frame_created = false; 2208 2209 // dtrace method entry support 2210 { 2211 SkipIfEqual skip_if( 2212 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2213 // create inner frame 2214 __ save_frame(0); 2215 __ mov(G2_thread, L7_thread_cache); 2216 __ set_metadata_constant(method(), O1); 2217 __ call_VM_leaf(L7_thread_cache, 2218 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2219 G2_thread, O1); 2220 __ restore(); 2221 } 2222 2223 // RedefineClasses() tracing support for obsolete method entry 2224 if (log_is_enabled(Trace, redefine, class, obsolete)) { 2225 // create inner frame 2226 __ save_frame(0); 2227 __ mov(G2_thread, L7_thread_cache); 2228 __ set_metadata_constant(method(), O1); 2229 __ call_VM_leaf(L7_thread_cache, 2230 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2231 G2_thread, O1); 2232 __ restore(); 2233 } 2234 2235 // We are in the jni frame unless saved_frame is true in which case 2236 // we are in one frame deeper (the "inner" frame). If we are in the 2237 // "inner" frames the args are in the Iregs and if the jni frame then 2238 // they are in the Oregs. 2239 // If we ever need to go to the VM (for locking, jvmti) then 2240 // we will always be in the "inner" frame. 2241 2242 // Lock a synchronized method 2243 int lock_offset = -1; // Set if locked 2244 if (method->is_synchronized()) { 2245 Register Roop = O1; 2246 const Register L3_box = L3; 2247 2248 create_inner_frame(masm, &inner_frame_created); 2249 2250 __ ld_ptr(I1, 0, O1); 2251 Label done; 2252 2253 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size); 2254 __ add(FP, lock_offset+STACK_BIAS, L3_box); 2255 #ifdef ASSERT 2256 if (UseBiasedLocking) { 2257 // making the box point to itself will make it clear it went unused 2258 // but also be obviously invalid 2259 __ st_ptr(L3_box, L3_box, 0); 2260 } 2261 #endif // ASSERT 2262 // 2263 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch 2264 // 2265 __ compiler_lock_object(Roop, L1, L3_box, L2); 2266 __ br(Assembler::equal, false, Assembler::pt, done); 2267 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box); 2268 2269 2270 // None of the above fast optimizations worked so we have to get into the 2271 // slow case of monitor enter. Inline a special case of call_VM that 2272 // disallows any pending_exception. 2273 __ mov(Roop, O0); // Need oop in O0 2274 __ mov(L3_box, O1); 2275 2276 // Record last_Java_sp, in case the VM code releases the JVM lock. 2277 2278 __ set_last_Java_frame(FP, I7); 2279 2280 // do the call 2281 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2282 __ delayed()->mov(L7_thread_cache, O2); 2283 2284 __ restore_thread(L7_thread_cache); // restore G2_thread 2285 __ reset_last_Java_frame(); 2286 2287 #ifdef ASSERT 2288 { Label L; 2289 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2290 __ br_null_short(O0, Assembler::pt, L); 2291 __ stop("no pending exception allowed on exit from IR::monitorenter"); 2292 __ bind(L); 2293 } 2294 #endif 2295 __ bind(done); 2296 } 2297 2298 2299 // Finally just about ready to make the JNI call 2300 2301 __ flushw(); 2302 if (inner_frame_created) { 2303 __ restore(); 2304 } else { 2305 // Store only what we need from this frame 2306 // QQQ I think that non-v9 (like we care) we don't need these saves 2307 // either as the flush traps and the current window goes too. 2308 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2309 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2310 } 2311 2312 // get JNIEnv* which is first argument to native 2313 if (!is_critical_native) { 2314 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2315 } 2316 2317 // Use that pc we placed in O7 a while back as the current frame anchor 2318 __ set_last_Java_frame(SP, O7); 2319 2320 // We flushed the windows ages ago now mark them as flushed before transitioning. 2321 __ set(JavaFrameAnchor::flushed, G3_scratch); 2322 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 2323 2324 // Transition from _thread_in_Java to _thread_in_native. 2325 __ set(_thread_in_native, G3_scratch); 2326 2327 AddressLiteral dest(native_func); 2328 __ relocate(relocInfo::runtime_call_type); 2329 __ jumpl_to(dest, O7, O7); 2330 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2331 2332 __ restore_thread(L7_thread_cache); // restore G2_thread 2333 2334 // Unpack native results. For int-types, we do any needed sign-extension 2335 // and move things into I0. The return value there will survive any VM 2336 // calls for blocking or unlocking. An FP or OOP result (handle) is done 2337 // specially in the slow-path code. 2338 switch (ret_type) { 2339 case T_VOID: break; // Nothing to do! 2340 case T_FLOAT: break; // Got it where we want it (unless slow-path) 2341 case T_DOUBLE: break; // Got it where we want it (unless slow-path) 2342 // In 64 bits build result is in O0, in O0, O1 in 32bit build 2343 case T_LONG: 2344 // Fall thru 2345 case T_OBJECT: // Really a handle 2346 case T_ARRAY: 2347 case T_INT: 2348 __ mov(O0, I0); 2349 break; 2350 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false 2351 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break; 2352 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value! 2353 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break; 2354 break; // Cannot de-handlize until after reclaiming jvm_lock 2355 default: 2356 ShouldNotReachHere(); 2357 } 2358 2359 Label after_transition; 2360 // must we block? 2361 2362 // Block, if necessary, before resuming in _thread_in_Java state. 2363 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2364 { Label no_block; 2365 2366 // Switch thread to "native transition" state before reading the synchronization state. 2367 // This additional state is necessary because reading and testing the synchronization 2368 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2369 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2370 // VM thread changes sync state to synchronizing and suspends threads for GC. 2371 // Thread A is resumed to finish this native method, but doesn't block here since it 2372 // didn't see any synchronization is progress, and escapes. 2373 __ set(_thread_in_native_trans, G3_scratch); 2374 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2375 2376 // Force this write out before the read below 2377 __ membar(Assembler::StoreLoad); 2378 2379 Label L; 2380 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); 2381 __ safepoint_poll(L, false, G2_thread, G3_scratch); 2382 __ delayed()->ld(suspend_state, G3_scratch); 2383 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 2384 __ bind(L); 2385 2386 // Block. Save any potential method result value before the operation and 2387 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2388 // lets us share the oopMap we used when we went native rather the create 2389 // a distinct one for this pc 2390 // 2391 save_native_result(masm, ret_type, stack_slots); 2392 if (!is_critical_native) { 2393 __ call_VM_leaf(L7_thread_cache, 2394 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2395 G2_thread); 2396 } else { 2397 __ call_VM_leaf(L7_thread_cache, 2398 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), 2399 G2_thread); 2400 } 2401 2402 // Restore any method result value 2403 restore_native_result(masm, ret_type, stack_slots); 2404 2405 if (is_critical_native) { 2406 // The call above performed the transition to thread_in_Java so 2407 // skip the transition logic below. 2408 __ ba(after_transition); 2409 __ delayed()->nop(); 2410 } 2411 2412 __ bind(no_block); 2413 } 2414 2415 // thread state is thread_in_native_trans. Any safepoint blocking has already 2416 // happened so we can now change state to _thread_in_Java. 2417 __ set(_thread_in_Java, G3_scratch); 2418 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2419 __ bind(after_transition); 2420 2421 Label no_reguard; 2422 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch); 2423 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_reserved_disabled, Assembler::notEqual, Assembler::pt, no_reguard); 2424 2425 save_native_result(masm, ret_type, stack_slots); 2426 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2427 __ delayed()->nop(); 2428 2429 __ restore_thread(L7_thread_cache); // restore G2_thread 2430 restore_native_result(masm, ret_type, stack_slots); 2431 2432 __ bind(no_reguard); 2433 2434 // Handle possible exception (will unlock if necessary) 2435 2436 // native result if any is live in freg or I0 (and I1 if long and 32bit vm) 2437 2438 // Unlock 2439 if (method->is_synchronized()) { 2440 Label done; 2441 Register I2_ex_oop = I2; 2442 const Register L3_box = L3; 2443 // Get locked oop from the handle we passed to jni 2444 __ ld_ptr(L6_handle, 0, L4); 2445 __ add(SP, lock_offset+STACK_BIAS, L3_box); 2446 // Must save pending exception around the slow-path VM call. Since it's a 2447 // leaf call, the pending exception (if any) can be kept in a register. 2448 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop); 2449 // Now unlock 2450 // (Roop, Rmark, Rbox, Rscratch) 2451 __ compiler_unlock_object(L4, L1, L3_box, L2); 2452 __ br(Assembler::equal, false, Assembler::pt, done); 2453 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box); 2454 2455 // save and restore any potential method result value around the unlocking 2456 // operation. Will save in I0 (or stack for FP returns). 2457 save_native_result(masm, ret_type, stack_slots); 2458 2459 // Must clear pending-exception before re-entering the VM. Since this is 2460 // a leaf call, pending-exception-oop can be safely kept in a register. 2461 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset())); 2462 2463 // slow case of monitor enter. Inline a special case of call_VM that 2464 // disallows any pending_exception. 2465 __ mov(L3_box, O1); 2466 2467 // Pass in current thread pointer 2468 __ mov(G2_thread, O2); 2469 2470 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type); 2471 __ delayed()->mov(L4, O0); // Need oop in O0 2472 2473 __ restore_thread(L7_thread_cache); // restore G2_thread 2474 2475 #ifdef ASSERT 2476 { Label L; 2477 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2478 __ br_null_short(O0, Assembler::pt, L); 2479 __ stop("no pending exception allowed on exit from IR::monitorexit"); 2480 __ bind(L); 2481 } 2482 #endif 2483 restore_native_result(masm, ret_type, stack_slots); 2484 // check_forward_pending_exception jump to forward_exception if any pending 2485 // exception is set. The forward_exception routine expects to see the 2486 // exception in pending_exception and not in a register. Kind of clumsy, 2487 // since all folks who branch to forward_exception must have tested 2488 // pending_exception first and hence have it in a register already. 2489 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset())); 2490 __ bind(done); 2491 } 2492 2493 // Tell dtrace about this method exit 2494 { 2495 SkipIfEqual skip_if( 2496 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2497 save_native_result(masm, ret_type, stack_slots); 2498 __ set_metadata_constant(method(), O1); 2499 __ call_VM_leaf(L7_thread_cache, 2500 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2501 G2_thread, O1); 2502 restore_native_result(masm, ret_type, stack_slots); 2503 } 2504 2505 // Clear "last Java frame" SP and PC. 2506 __ verify_thread(); // G2_thread must be correct 2507 __ reset_last_Java_frame(); 2508 2509 // Unbox oop result, e.g. JNIHandles::resolve value in I0. 2510 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2511 __ resolve_jobject(I0, G3_scratch); 2512 } 2513 2514 if (CheckJNICalls) { 2515 // clear_pending_jni_exception_check 2516 __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset()); 2517 } 2518 2519 if (!is_critical_native) { 2520 // reset handle block 2521 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2522 __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2523 2524 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2525 check_forward_pending_exception(masm, G3_scratch); 2526 } 2527 2528 2529 // Return 2530 2531 __ ret(); 2532 __ delayed()->restore(); 2533 2534 __ flush(); 2535 2536 nmethod *nm = nmethod::new_native_nmethod(method, 2537 compile_id, 2538 masm->code(), 2539 vep_offset, 2540 frame_complete, 2541 stack_slots / VMRegImpl::slots_per_word, 2542 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2543 in_ByteSize(lock_offset), 2544 oop_maps); 2545 2546 if (is_critical_native) { 2547 nm->set_lazy_critical_native(true); 2548 } 2549 return nm; 2550 2551 } 2552 2553 // this function returns the adjust size (in number of words) to a c2i adapter 2554 // activation for use during deoptimization 2555 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2556 assert(callee_locals >= callee_parameters, 2557 "test and remove; got more parms than locals"); 2558 if (callee_locals < callee_parameters) 2559 return 0; // No adjustment for negative locals 2560 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2561 return align_up(diff, WordsPerLong); 2562 } 2563 2564 // "Top of Stack" slots that may be unused by the calling convention but must 2565 // otherwise be preserved. 2566 // On Intel these are not necessary and the value can be zero. 2567 // On Sparc this describes the words reserved for storing a register window 2568 // when an interrupt occurs. 2569 uint SharedRuntime::out_preserve_stack_slots() { 2570 return frame::register_save_words * VMRegImpl::slots_per_word; 2571 } 2572 2573 static void gen_new_frame(MacroAssembler* masm, bool deopt) { 2574 // 2575 // Common out the new frame generation for deopt and uncommon trap 2576 // 2577 Register G3pcs = G3_scratch; // Array of new pcs (input) 2578 Register Oreturn0 = O0; 2579 Register Oreturn1 = O1; 2580 Register O2UnrollBlock = O2; 2581 Register O3array = O3; // Array of frame sizes (input) 2582 Register O4array_size = O4; // number of frames (input) 2583 Register O7frame_size = O7; // number of frames (input) 2584 2585 __ ld_ptr(O3array, 0, O7frame_size); 2586 __ sub(G0, O7frame_size, O7frame_size); 2587 __ save(SP, O7frame_size, SP); 2588 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc 2589 2590 #ifdef ASSERT 2591 // make sure that the frames are aligned properly 2592 #endif 2593 2594 // Deopt needs to pass some extra live values from frame to frame 2595 2596 if (deopt) { 2597 __ mov(Oreturn0->after_save(), Oreturn0); 2598 __ mov(Oreturn1->after_save(), Oreturn1); 2599 } 2600 2601 __ mov(O4array_size->after_save(), O4array_size); 2602 __ sub(O4array_size, 1, O4array_size); 2603 __ mov(O3array->after_save(), O3array); 2604 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock); 2605 __ add(G3pcs, wordSize, G3pcs); // point to next pc value 2606 2607 #ifdef ASSERT 2608 // trash registers to show a clear pattern in backtraces 2609 __ set(0xDEAD0000, I0); 2610 __ add(I0, 2, I1); 2611 __ add(I0, 4, I2); 2612 __ add(I0, 6, I3); 2613 __ add(I0, 8, I4); 2614 // Don't touch I5 could have valuable savedSP 2615 __ set(0xDEADBEEF, L0); 2616 __ mov(L0, L1); 2617 __ mov(L0, L2); 2618 __ mov(L0, L3); 2619 __ mov(L0, L4); 2620 __ mov(L0, L5); 2621 2622 // trash the return value as there is nothing to return yet 2623 __ set(0xDEAD0001, O7); 2624 #endif 2625 2626 __ mov(SP, O5_savedSP); 2627 } 2628 2629 2630 static void make_new_frames(MacroAssembler* masm, bool deopt) { 2631 // 2632 // loop through the UnrollBlock info and create new frames 2633 // 2634 Register G3pcs = G3_scratch; 2635 Register Oreturn0 = O0; 2636 Register Oreturn1 = O1; 2637 Register O2UnrollBlock = O2; 2638 Register O3array = O3; 2639 Register O4array_size = O4; 2640 Label loop; 2641 2642 #ifdef ASSERT 2643 // Compilers generate code that bang the stack by as much as the 2644 // interpreter would need. So this stack banging should never 2645 // trigger a fault. Verify that it does not on non product builds. 2646 if (UseStackBanging) { 2647 // Get total frame size for interpreted frames 2648 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4); 2649 __ bang_stack_size(O4, O3, G3_scratch); 2650 } 2651 #endif 2652 2653 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size); 2654 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs); 2655 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array); 2656 2657 // Adjust old interpreter frame to make space for new frame's extra java locals 2658 // 2659 // We capture the original sp for the transition frame only because it is needed in 2660 // order to properly calculate interpreter_sp_adjustment. Even though in real life 2661 // every interpreter frame captures a savedSP it is only needed at the transition 2662 // (fortunately). If we had to have it correct everywhere then we would need to 2663 // be told the sp_adjustment for each frame we create. If the frame size array 2664 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 2665 // for each frame we create and keep up the illusion every where. 2666 // 2667 2668 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7); 2669 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 2670 __ sub(SP, O7, SP); 2671 2672 #ifdef ASSERT 2673 // make sure that there is at least one entry in the array 2674 __ tst(O4array_size); 2675 __ breakpoint_trap(Assembler::zero, Assembler::icc); 2676 #endif 2677 2678 // Now push the new interpreter frames 2679 __ bind(loop); 2680 2681 // allocate a new frame, filling the registers 2682 2683 gen_new_frame(masm, deopt); // allocate an interpreter frame 2684 2685 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop); 2686 __ delayed()->add(O3array, wordSize, O3array); 2687 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc 2688 2689 } 2690 2691 //------------------------------generate_deopt_blob---------------------------- 2692 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 2693 // instead. 2694 void SharedRuntime::generate_deopt_blob() { 2695 // allocate space for the code 2696 ResourceMark rm; 2697 // setup code generation tools 2698 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 2699 #ifdef ASSERT 2700 if (UseStackBanging) { 2701 pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32; 2702 } 2703 #endif 2704 #if INCLUDE_JVMCI 2705 if (EnableJVMCI) { 2706 pad += 1000; // Increase the buffer size when compiling for JVMCI 2707 } 2708 #endif 2709 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 2710 MacroAssembler* masm = new MacroAssembler(&buffer); 2711 FloatRegister Freturn0 = F0; 2712 Register Greturn1 = G1; 2713 Register Oreturn0 = O0; 2714 Register Oreturn1 = O1; 2715 Register O2UnrollBlock = O2; 2716 Register L0deopt_mode = L0; 2717 Register G4deopt_mode = G4_scratch; 2718 int frame_size_words; 2719 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS); 2720 Label cont; 2721 2722 OopMapSet *oop_maps = new OopMapSet(); 2723 2724 // 2725 // This is the entry point for code which is returning to a de-optimized 2726 // frame. 2727 // The steps taken by this frame are as follows: 2728 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1) 2729 // and all potentially live registers (at a pollpoint many registers can be live). 2730 // 2731 // - call the C routine: Deoptimization::fetch_unroll_info (this function 2732 // returns information about the number and size of interpreter frames 2733 // which are equivalent to the frame which is being deoptimized) 2734 // - deallocate the unpack frame, restoring only results values. Other 2735 // volatile registers will now be captured in the vframeArray as needed. 2736 // - deallocate the deoptimization frame 2737 // - in a loop using the information returned in the previous step 2738 // push new interpreter frames (take care to propagate the return 2739 // values through each new frame pushed) 2740 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0) 2741 // - call the C routine: Deoptimization::unpack_frames (this function 2742 // lays out values on the interpreter frame which was just created) 2743 // - deallocate the dummy unpack_frame 2744 // - ensure that all the return values are correctly set and then do 2745 // a return to the interpreter entry point 2746 // 2747 // Refer to the following methods for more information: 2748 // - Deoptimization::fetch_unroll_info 2749 // - Deoptimization::unpack_frames 2750 2751 OopMap* map = NULL; 2752 2753 int start = __ offset(); 2754 2755 // restore G2, the trampoline destroyed it 2756 __ get_thread(); 2757 2758 // On entry we have been called by the deoptimized nmethod with a call that 2759 // replaced the original call (or safepoint polling location) so the deoptimizing 2760 // pc is now in O7. Return values are still in the expected places 2761 2762 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 2763 __ ba(cont); 2764 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode); 2765 2766 2767 #if INCLUDE_JVMCI 2768 Label after_fetch_unroll_info_call; 2769 int implicit_exception_uncommon_trap_offset = 0; 2770 int uncommon_trap_offset = 0; 2771 2772 if (EnableJVMCI) { 2773 masm->block_comment("BEGIN implicit_exception_uncommon_trap"); 2774 implicit_exception_uncommon_trap_offset = __ offset() - start; 2775 2776 __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()), O7); 2777 __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2778 __ add(O7, -8, O7); 2779 2780 uncommon_trap_offset = __ offset() - start; 2781 2782 // Save everything in sight. 2783 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 2784 __ set_last_Java_frame(SP, NULL); 2785 2786 __ ld(G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()), O1); 2787 __ sub(G0, 1, L1); 2788 __ st(L1, G2_thread, in_bytes(JavaThread::pending_deoptimization_offset())); 2789 2790 __ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode); 2791 __ mov(G2_thread, O0); 2792 __ mov(L0deopt_mode, O2); 2793 __ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)); 2794 __ delayed()->nop(); 2795 oop_maps->add_gc_map( __ offset()-start, map->deep_copy()); 2796 __ get_thread(); 2797 __ add(O7, 8, O7); 2798 __ reset_last_Java_frame(); 2799 2800 __ ba(after_fetch_unroll_info_call); 2801 __ delayed()->nop(); // Delay slot 2802 masm->block_comment("END implicit_exception_uncommon_trap"); 2803 } // EnableJVMCI 2804 #endif // INCLUDE_JVMCI 2805 2806 int exception_offset = __ offset() - start; 2807 2808 // restore G2, the trampoline destroyed it 2809 __ get_thread(); 2810 2811 // On entry we have been jumped to by the exception handler (or exception_blob 2812 // for server). O0 contains the exception oop and O7 contains the original 2813 // exception pc. So if we push a frame here it will look to the 2814 // stack walking code (fetch_unroll_info) just like a normal call so 2815 // state will be extracted normally. 2816 2817 // save exception oop in JavaThread and fall through into the 2818 // exception_in_tls case since they are handled in same way except 2819 // for where the pending exception is kept. 2820 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); 2821 2822 // 2823 // Vanilla deoptimization with an exception pending in exception_oop 2824 // 2825 int exception_in_tls_offset = __ offset() - start; 2826 2827 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 2828 // Opens a new stack frame 2829 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 2830 2831 // Restore G2_thread 2832 __ get_thread(); 2833 2834 #ifdef ASSERT 2835 { 2836 // verify that there is really an exception oop in exception_oop 2837 Label has_exception; 2838 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); 2839 __ br_notnull_short(Oexception, Assembler::pt, has_exception); 2840 __ stop("no exception in thread"); 2841 __ bind(has_exception); 2842 2843 // verify that there is no pending exception 2844 Label no_pending_exception; 2845 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 2846 __ ld_ptr(exception_addr, Oexception); 2847 __ br_null_short(Oexception, Assembler::pt, no_pending_exception); 2848 __ stop("must not have pending exception here"); 2849 __ bind(no_pending_exception); 2850 } 2851 #endif 2852 2853 __ ba(cont); 2854 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);; 2855 2856 // 2857 // Reexecute entry, similar to c2 uncommon trap 2858 // 2859 int reexecute_offset = __ offset() - start; 2860 #if INCLUDE_JVMCI && !defined(COMPILER1) 2861 if (EnableJVMCI && UseJVMCICompiler) { 2862 // JVMCI does not use this kind of deoptimization 2863 __ should_not_reach_here(); 2864 } 2865 #endif 2866 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 2867 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 2868 2869 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode); 2870 2871 __ bind(cont); 2872 2873 __ set_last_Java_frame(SP, noreg); 2874 2875 // do the call by hand so we can get the oopmap 2876 2877 __ mov(G2_thread, L7_thread_cache); 2878 __ mov(L0deopt_mode, O1); 2879 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); 2880 __ delayed()->mov(G2_thread, O0); 2881 2882 // Set an oopmap for the call site this describes all our saved volatile registers 2883 2884 oop_maps->add_gc_map( __ offset()-start, map); 2885 2886 __ mov(L7_thread_cache, G2_thread); 2887 2888 __ reset_last_Java_frame(); 2889 2890 #if INCLUDE_JVMCI 2891 if (EnableJVMCI) { 2892 __ bind(after_fetch_unroll_info_call); 2893 } 2894 #endif 2895 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers 2896 // so this move will survive 2897 2898 __ mov(L0deopt_mode, G4deopt_mode); 2899 2900 __ mov(O0, O2UnrollBlock->after_save()); 2901 2902 RegisterSaver::restore_result_registers(masm); 2903 2904 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), G4deopt_mode); 2905 Label noException; 2906 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException); 2907 2908 // Move the pending exception from exception_oop to Oexception so 2909 // the pending exception will be picked up the interpreter. 2910 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 2911 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 2912 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 2913 __ bind(noException); 2914 2915 // deallocate the deoptimization frame taking care to preserve the return values 2916 __ mov(Oreturn0, Oreturn0->after_save()); 2917 __ mov(Oreturn1, Oreturn1->after_save()); 2918 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 2919 __ restore(); 2920 2921 // Allocate new interpreter frame(s) and possible c2i adapter frame 2922 2923 make_new_frames(masm, true); 2924 2925 // push a dummy "unpack_frame" taking care of float return values and 2926 // call Deoptimization::unpack_frames to have the unpacker layout 2927 // information in the interpreter frames just created and then return 2928 // to the interpreter entry point 2929 __ save(SP, -frame_size_words*wordSize, SP); 2930 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); 2931 // LP64 uses g4 in set_last_Java_frame 2932 __ mov(G4deopt_mode, O1); 2933 __ set_last_Java_frame(SP, G0); 2934 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); 2935 __ reset_last_Java_frame(); 2936 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); 2937 2938 __ ret(); 2939 __ delayed()->restore(); 2940 2941 masm->flush(); 2942 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words); 2943 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2944 #if INCLUDE_JVMCI 2945 if (EnableJVMCI) { 2946 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); 2947 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); 2948 } 2949 #endif 2950 } 2951 2952 #ifdef COMPILER2 2953 2954 //------------------------------generate_uncommon_trap_blob-------------------- 2955 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 2956 // instead. 2957 void SharedRuntime::generate_uncommon_trap_blob() { 2958 // allocate space for the code 2959 ResourceMark rm; 2960 // setup code generation tools 2961 int pad = VerifyThread ? 512 : 0; 2962 #ifdef ASSERT 2963 if (UseStackBanging) { 2964 pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32; 2965 } 2966 #endif 2967 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 2968 MacroAssembler* masm = new MacroAssembler(&buffer); 2969 Register O2UnrollBlock = O2; 2970 Register O2klass_index = O2; 2971 2972 // 2973 // This is the entry point for all traps the compiler takes when it thinks 2974 // it cannot handle further execution of compilation code. The frame is 2975 // deoptimized in these cases and converted into interpreter frames for 2976 // execution 2977 // The steps taken by this frame are as follows: 2978 // - push a fake "unpack_frame" 2979 // - call the C routine Deoptimization::uncommon_trap (this function 2980 // packs the current compiled frame into vframe arrays and returns 2981 // information about the number and size of interpreter frames which 2982 // are equivalent to the frame which is being deoptimized) 2983 // - deallocate the "unpack_frame" 2984 // - deallocate the deoptimization frame 2985 // - in a loop using the information returned in the previous step 2986 // push interpreter frames; 2987 // - create a dummy "unpack_frame" 2988 // - call the C routine: Deoptimization::unpack_frames (this function 2989 // lays out values on the interpreter frame which was just created) 2990 // - deallocate the dummy unpack_frame 2991 // - return to the interpreter entry point 2992 // 2993 // Refer to the following methods for more information: 2994 // - Deoptimization::uncommon_trap 2995 // - Deoptimization::unpack_frame 2996 2997 // the unloaded class index is in O0 (first parameter to this blob) 2998 2999 // push a dummy "unpack_frame" 3000 // and call Deoptimization::uncommon_trap to pack the compiled frame into 3001 // vframe array and return the UnrollBlock information 3002 __ save_frame(0); 3003 __ set_last_Java_frame(SP, noreg); 3004 __ mov(I0, O2klass_index); 3005 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // exec mode 3006 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index, O3); 3007 __ reset_last_Java_frame(); 3008 __ mov(O0, O2UnrollBlock->after_save()); 3009 __ restore(); 3010 3011 // deallocate the deoptimized frame taking care to preserve the return values 3012 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3013 __ restore(); 3014 3015 #ifdef ASSERT 3016 { Label L; 3017 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), O1); 3018 __ cmp_and_br_short(O1, Deoptimization::Unpack_uncommon_trap, Assembler::equal, Assembler::pt, L); 3019 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap"); 3020 __ bind(L); 3021 } 3022 #endif 3023 3024 // Allocate new interpreter frame(s) and possible c2i adapter frame 3025 3026 make_new_frames(masm, false); 3027 3028 // push a dummy "unpack_frame" taking care of float return values and 3029 // call Deoptimization::unpack_frames to have the unpacker layout 3030 // information in the interpreter frames just created and then return 3031 // to the interpreter entry point 3032 __ save_frame(0); 3033 __ set_last_Java_frame(SP, noreg); 3034 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case 3035 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3); 3036 __ reset_last_Java_frame(); 3037 __ ret(); 3038 __ delayed()->restore(); 3039 3040 masm->flush(); 3041 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize); 3042 } 3043 3044 #endif // COMPILER2 3045 3046 //------------------------------generate_handler_blob------------------- 3047 // 3048 // Generate a special Compile2Runtime blob that saves all registers, and sets 3049 // up an OopMap. 3050 // 3051 // This blob is jumped to (via a breakpoint and the signal handler) from a 3052 // safepoint in compiled code. On entry to this blob, O7 contains the 3053 // address in the original nmethod at which we should resume normal execution. 3054 // Thus, this blob looks like a subroutine which must preserve lots of 3055 // registers and return normally. Note that O7 is never register-allocated, 3056 // so it is guaranteed to be free here. 3057 // 3058 3059 // The hardest part of what this blob must do is to save the 64-bit %o 3060 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and 3061 // an interrupt will chop off their heads. Making space in the caller's frame 3062 // first will let us save the 64-bit %o's before save'ing, but we cannot hand 3063 // the adjusted FP off to the GC stack-crawler: this will modify the caller's 3064 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save 3065 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). 3066 // Tricky, tricky, tricky... 3067 3068 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3069 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3070 3071 // allocate space for the code 3072 ResourceMark rm; 3073 // setup code generation tools 3074 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3075 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3076 CodeBuffer buffer("handler_blob", 1600, 512); 3077 MacroAssembler* masm = new MacroAssembler(&buffer); 3078 int frame_size_words; 3079 OopMapSet *oop_maps = new OopMapSet(); 3080 OopMap* map = NULL; 3081 3082 int start = __ offset(); 3083 3084 bool cause_return = (poll_type == POLL_AT_RETURN); 3085 // If this causes a return before the processing, then do a "restore" 3086 if (cause_return) { 3087 __ restore(); 3088 } else { 3089 // Make it look like we were called via the poll 3090 // so that frame constructor always sees a valid return address 3091 __ ld_ptr(Address(G2_thread, JavaThread::saved_exception_pc_offset()), O7); 3092 __ sub(O7, frame::pc_return_offset, O7); 3093 } 3094 3095 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3096 3097 // setup last_Java_sp (blows G4) 3098 __ set_last_Java_frame(SP, noreg); 3099 3100 Register saved_O7 = O7->after_save(); 3101 if (!cause_return && SafepointMechanism::uses_thread_local_poll()) { 3102 // Keep a copy of the return pc in L0 to detect if it gets modified 3103 __ mov(saved_O7, L0); 3104 // Adjust and keep a copy of our npc saved by the signal handler 3105 __ ld_ptr(Address(G2_thread, JavaThread::saved_exception_npc_offset()), L1); 3106 __ sub(L1, frame::pc_return_offset, L1); 3107 } 3108 3109 // call into the runtime to handle illegal instructions exception 3110 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3111 __ mov(G2_thread, O0); 3112 __ save_thread(L7_thread_cache); 3113 __ call(call_ptr); 3114 __ delayed()->nop(); 3115 3116 // Set an oopmap for the call site. 3117 // We need this not only for callee-saved registers, but also for volatile 3118 // registers that the compiler might be keeping live across a safepoint. 3119 3120 oop_maps->add_gc_map( __ offset() - start, map); 3121 3122 __ restore_thread(L7_thread_cache); 3123 // clear last_Java_sp 3124 __ reset_last_Java_frame(); 3125 3126 // Check for exceptions 3127 Label pending; 3128 3129 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3130 __ br_notnull_short(O1, Assembler::pn, pending); 3131 3132 if (!cause_return && SafepointMechanism::uses_thread_local_poll()) { 3133 // If nobody modified our return pc then we must return to the npc which he saved in L1 3134 __ cmp(saved_O7, L0); 3135 __ movcc(Assembler::equal, false, Assembler::ptr_cc, L1, saved_O7); 3136 } 3137 3138 RegisterSaver::restore_live_registers(masm); 3139 3140 // We are back the the original state on entry and ready to go. 3141 3142 __ retl(); 3143 __ delayed()->nop(); 3144 3145 // Pending exception after the safepoint 3146 3147 __ bind(pending); 3148 3149 RegisterSaver::restore_live_registers(masm); 3150 3151 // We are back the the original state on entry. 3152 3153 // Tail-call forward_exception_entry, with the issuing PC in O7, 3154 // so it looks like the original nmethod called forward_exception_entry. 3155 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3156 __ JMP(O0, 0); 3157 __ delayed()->nop(); 3158 3159 // ------------- 3160 // make sure all code is generated 3161 masm->flush(); 3162 3163 // return exception blob 3164 return SafepointBlob::create(&buffer, oop_maps, frame_size_words); 3165 } 3166 3167 // 3168 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3169 // 3170 // Generate a stub that calls into vm to find out the proper destination 3171 // of a java call. All the argument registers are live at this point 3172 // but since this is generic code we don't know what they are and the caller 3173 // must do any gc of the args. 3174 // 3175 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3176 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3177 3178 // allocate space for the code 3179 ResourceMark rm; 3180 // setup code generation tools 3181 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3182 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3183 CodeBuffer buffer(name, 1600, 512); 3184 MacroAssembler* masm = new MacroAssembler(&buffer); 3185 int frame_size_words; 3186 OopMapSet *oop_maps = new OopMapSet(); 3187 OopMap* map = NULL; 3188 3189 int start = __ offset(); 3190 3191 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3192 3193 int frame_complete = __ offset(); 3194 3195 // setup last_Java_sp (blows G4) 3196 __ set_last_Java_frame(SP, noreg); 3197 3198 // call into the runtime to handle illegal instructions exception 3199 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3200 __ mov(G2_thread, O0); 3201 __ save_thread(L7_thread_cache); 3202 __ call(destination, relocInfo::runtime_call_type); 3203 __ delayed()->nop(); 3204 3205 // O0 contains the address we are going to jump to assuming no exception got installed 3206 3207 // Set an oopmap for the call site. 3208 // We need this not only for callee-saved registers, but also for volatile 3209 // registers that the compiler might be keeping live across a safepoint. 3210 3211 oop_maps->add_gc_map( __ offset() - start, map); 3212 3213 __ restore_thread(L7_thread_cache); 3214 // clear last_Java_sp 3215 __ reset_last_Java_frame(); 3216 3217 // Check for exceptions 3218 Label pending; 3219 3220 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3221 __ br_notnull_short(O1, Assembler::pn, pending); 3222 3223 // get the returned Method* 3224 3225 __ get_vm_result_2(G5_method); 3226 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 3227 3228 // O0 is where we want to jump, overwrite G3 which is saved and scratch 3229 3230 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 3231 3232 RegisterSaver::restore_live_registers(masm); 3233 3234 // We are back the the original state on entry and ready to go. 3235 3236 __ JMP(G3, 0); 3237 __ delayed()->nop(); 3238 3239 // Pending exception after the safepoint 3240 3241 __ bind(pending); 3242 3243 RegisterSaver::restore_live_registers(masm); 3244 3245 // We are back the the original state on entry. 3246 3247 // Tail-call forward_exception_entry, with the issuing PC in O7, 3248 // so it looks like the original nmethod called forward_exception_entry. 3249 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3250 __ JMP(O0, 0); 3251 __ delayed()->nop(); 3252 3253 // ------------- 3254 // make sure all code is generated 3255 masm->flush(); 3256 3257 // return the blob 3258 // frame_size_words or bytes?? 3259 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3260 }