1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "code/debugInfoRec.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/vtableStubs.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "logging/log.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "oops/compiledICHolder.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/vframeArray.hpp" 36 #include "utilities/align.hpp" 37 #include "vmreg_sparc.inline.hpp" 38 #ifdef COMPILER1 39 #include "c1/c1_Runtime1.hpp" 40 #endif 41 #ifdef COMPILER2 42 #include "opto/runtime.hpp" 43 #endif 44 #if INCLUDE_JVMCI 45 #include "jvmci/jvmciJavaClasses.hpp" 46 #endif 47 48 #define __ masm-> 49 50 51 class RegisterSaver { 52 53 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O. 54 // The Oregs are problematic. In the 32bit build the compiler can 55 // have O registers live with 64 bit quantities. A window save will 56 // cut the heads off of the registers. We have to do a very extensive 57 // stack dance to save and restore these properly. 58 59 // Note that the Oregs problem only exists if we block at either a polling 60 // page exception a compiled code safepoint that was not originally a call 61 // or deoptimize following one of these kinds of safepoints. 62 63 // Lots of registers to save. For all builds, a window save will preserve 64 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit 65 // builds a window-save will preserve the %o registers. In the LION build 66 // we need to save the 64-bit %o registers which requires we save them 67 // before the window-save (as then they become %i registers and get their 68 // heads chopped off on interrupt). We have to save some %g registers here 69 // as well. 70 enum { 71 // This frame's save area. Includes extra space for the native call: 72 // vararg's layout space and the like. Briefly holds the caller's 73 // register save area. 74 call_args_area = frame::register_save_words_sp_offset + 75 frame::memory_parameter_word_sp_offset*wordSize, 76 // Make sure save locations are always 8 byte aligned. 77 // can't use align_up because it doesn't produce compile time constant 78 start_of_extra_save_area = ((call_args_area + 7) & ~7), 79 g1_offset = start_of_extra_save_area, // g-regs needing saving 80 g3_offset = g1_offset+8, 81 g4_offset = g3_offset+8, 82 g5_offset = g4_offset+8, 83 o0_offset = g5_offset+8, 84 o1_offset = o0_offset+8, 85 o2_offset = o1_offset+8, 86 o3_offset = o2_offset+8, 87 o4_offset = o3_offset+8, 88 o5_offset = o4_offset+8, 89 start_of_flags_save_area = o5_offset+8, 90 ccr_offset = start_of_flags_save_area, 91 fsr_offset = ccr_offset + 8, 92 d00_offset = fsr_offset+8, // Start of float save area 93 register_save_size = d00_offset+8*32 94 }; 95 96 97 public: 98 99 static int Oexception_offset() { return o0_offset; }; 100 static int G3_offset() { return g3_offset; }; 101 static int G5_offset() { return g5_offset; }; 102 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 103 static void restore_live_registers(MacroAssembler* masm); 104 105 // During deoptimization only the result register need to be restored 106 // all the other values have already been extracted. 107 108 static void restore_result_registers(MacroAssembler* masm); 109 }; 110 111 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 112 // Record volatile registers as callee-save values in an OopMap so their save locations will be 113 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 114 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 115 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 116 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 117 int i; 118 // Always make the frame size 16 byte aligned. 119 int frame_size = align_up(additional_frame_words + register_save_size, 16); 120 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words 121 int frame_size_in_slots = frame_size / sizeof(jint); 122 // CodeBlob frame size is in words. 123 *total_frame_words = frame_size / wordSize; 124 // OopMap* map = new OopMap(*total_frame_words, 0); 125 OopMap* map = new OopMap(frame_size_in_slots, 0); 126 127 __ save(SP, -frame_size, SP); 128 129 130 int debug_offset = 0; 131 // Save the G's 132 __ stx(G1, SP, g1_offset+STACK_BIAS); 133 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); 134 135 __ stx(G3, SP, g3_offset+STACK_BIAS); 136 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg()); 137 138 __ stx(G4, SP, g4_offset+STACK_BIAS); 139 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg()); 140 141 __ stx(G5, SP, g5_offset+STACK_BIAS); 142 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg()); 143 144 // This is really a waste but we'll keep things as they were for now 145 if (true) { 146 } 147 148 149 // Save the flags 150 __ rdccr( G5 ); 151 __ stx(G5, SP, ccr_offset+STACK_BIAS); 152 __ stxfsr(SP, fsr_offset+STACK_BIAS); 153 154 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles) 155 int offset = d00_offset; 156 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 157 FloatRegister f = as_FloatRegister(i); 158 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS); 159 // Record as callee saved both halves of double registers (2 float registers). 160 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg()); 161 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next()); 162 offset += sizeof(double); 163 } 164 165 // And we're done. 166 167 return map; 168 } 169 170 171 // Pop the current frame and restore all the registers that we 172 // saved. 173 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 174 175 // Restore all the FP registers 176 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 177 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i)); 178 } 179 180 __ ldx(SP, ccr_offset+STACK_BIAS, G1); 181 __ wrccr (G1) ; 182 183 // Restore the G's 184 // Note that G2 (AKA GThread) must be saved and restored separately. 185 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr. 186 187 __ ldx(SP, g1_offset+STACK_BIAS, G1); 188 __ ldx(SP, g3_offset+STACK_BIAS, G3); 189 __ ldx(SP, g4_offset+STACK_BIAS, G4); 190 __ ldx(SP, g5_offset+STACK_BIAS, G5); 191 192 // Restore flags 193 194 __ ldxfsr(SP, fsr_offset+STACK_BIAS); 195 196 __ restore(); 197 198 } 199 200 // Pop the current frame and restore the registers that might be holding 201 // a result. 202 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 203 204 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); 205 206 __ restore(); 207 208 } 209 210 // Is vector's size (in bytes) bigger than a size saved by default? 211 // 8 bytes FP registers are saved by default on SPARC. 212 bool SharedRuntime::is_wide_vector(int size) { 213 // Note, MaxVectorSize == 8 on SPARC. 214 assert(size <= 8, "%d bytes vectors are not supported", size); 215 return size > 8; 216 } 217 218 size_t SharedRuntime::trampoline_size() { 219 return 40; 220 } 221 222 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { 223 __ set((intptr_t)destination, G3_scratch); 224 __ JMP(G3_scratch, 0); 225 __ delayed()->nop(); 226 } 227 228 // The java_calling_convention describes stack locations as ideal slots on 229 // a frame with no abi restrictions. Since we must observe abi restrictions 230 // (like the placement of the register window) the slots must be biased by 231 // the following value. 232 static int reg2offset(VMReg r) { 233 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 234 } 235 236 static VMRegPair reg64_to_VMRegPair(Register r) { 237 VMRegPair ret; 238 if (wordSize == 8) { 239 ret.set2(r->as_VMReg()); 240 } else { 241 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg()); 242 } 243 return ret; 244 } 245 246 // --------------------------------------------------------------------------- 247 // Read the array of BasicTypes from a signature, and compute where the 248 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 249 // quantities. Values less than VMRegImpl::stack0 are registers, those above 250 // refer to 4-byte stack slots. All stack slots are based off of the window 251 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window, 252 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 253 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit 254 // integer registers. Values 64-95 are the (32-bit only) float registers. 255 // Each 32-bit quantity is given its own number, so the integer registers 256 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is 257 // an O0-low and an O0-high. Essentially, all int register numbers are doubled. 258 259 // Register results are passed in O0-O5, for outgoing call arguments. To 260 // convert to incoming arguments, convert all O's to I's. The regs array 261 // refer to the low and hi 32-bit words of 64-bit registers or stack slots. 262 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a 263 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was 264 // passed (used as a placeholder for the other half of longs and doubles in 265 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is 266 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention). 267 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first() 268 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the 269 // same VMRegPair. 270 271 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 272 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 273 // units regardless of build. 274 275 276 // --------------------------------------------------------------------------- 277 // The compiled Java calling convention. The Java convention always passes 278 // 64-bit values in adjacent aligned locations (either registers or stack), 279 // floats in float registers and doubles in aligned float pairs. There is 280 // no backing varargs store for values in registers. 281 // In the 32-bit build, longs are passed on the stack (cannot be 282 // passed in I's, because longs in I's get their heads chopped off at 283 // interrupt). 284 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 285 VMRegPair *regs, 286 int total_args_passed, 287 int is_outgoing) { 288 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers"); 289 290 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM; 291 const int flt_reg_max = 8; 292 293 int int_reg = 0; 294 int flt_reg = 0; 295 int slot = 0; 296 297 for (int i = 0; i < total_args_passed; i++) { 298 switch (sig_bt[i]) { 299 case T_INT: 300 case T_SHORT: 301 case T_CHAR: 302 case T_BYTE: 303 case T_BOOLEAN: 304 if (int_reg < int_reg_max) { 305 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 306 regs[i].set1(r->as_VMReg()); 307 } else { 308 regs[i].set1(VMRegImpl::stack2reg(slot++)); 309 } 310 break; 311 312 case T_LONG: 313 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 314 // fall-through 315 case T_OBJECT: 316 case T_ARRAY: 317 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 318 if (int_reg < int_reg_max) { 319 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 320 regs[i].set2(r->as_VMReg()); 321 } else { 322 slot = align_up(slot, 2); // align 323 regs[i].set2(VMRegImpl::stack2reg(slot)); 324 slot += 2; 325 } 326 break; 327 328 case T_FLOAT: 329 if (flt_reg < flt_reg_max) { 330 FloatRegister r = as_FloatRegister(flt_reg++); 331 regs[i].set1(r->as_VMReg()); 332 } else { 333 regs[i].set1(VMRegImpl::stack2reg(slot++)); 334 } 335 break; 336 337 case T_DOUBLE: 338 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 339 if (align_up(flt_reg, 2) + 1 < flt_reg_max) { 340 flt_reg = align_up(flt_reg, 2); // align 341 FloatRegister r = as_FloatRegister(flt_reg); 342 regs[i].set2(r->as_VMReg()); 343 flt_reg += 2; 344 } else { 345 slot = align_up(slot, 2); // align 346 regs[i].set2(VMRegImpl::stack2reg(slot)); 347 slot += 2; 348 } 349 break; 350 351 case T_VOID: 352 regs[i].set_bad(); // Halves of longs & doubles 353 break; 354 355 default: 356 fatal("unknown basic type %d", sig_bt[i]); 357 break; 358 } 359 } 360 361 // retun the amount of stack space these arguments will need. 362 return slot; 363 } 364 365 // Helper class mostly to avoid passing masm everywhere, and handle 366 // store displacement overflow logic. 367 class AdapterGenerator { 368 MacroAssembler *masm; 369 Register Rdisp; 370 void set_Rdisp(Register r) { Rdisp = r; } 371 372 void patch_callers_callsite(); 373 374 // base+st_off points to top of argument 375 int arg_offset(const int st_off) { return st_off; } 376 int next_arg_offset(const int st_off) { 377 return st_off - Interpreter::stackElementSize; 378 } 379 380 // Argument slot values may be loaded first into a register because 381 // they might not fit into displacement. 382 RegisterOrConstant arg_slot(const int st_off); 383 RegisterOrConstant next_arg_slot(const int st_off); 384 385 // Stores long into offset pointed to by base 386 void store_c2i_long(Register r, Register base, 387 const int st_off, bool is_stack); 388 void store_c2i_object(Register r, Register base, 389 const int st_off); 390 void store_c2i_int(Register r, Register base, 391 const int st_off); 392 void store_c2i_double(VMReg r_2, 393 VMReg r_1, Register base, const int st_off); 394 void store_c2i_float(FloatRegister f, Register base, 395 const int st_off); 396 397 public: 398 void gen_c2i_adapter(int total_args_passed, 399 // VMReg max_arg, 400 int comp_args_on_stack, // VMRegStackSlots 401 const BasicType *sig_bt, 402 const VMRegPair *regs, 403 Label& skip_fixup); 404 void gen_i2c_adapter(int total_args_passed, 405 // VMReg max_arg, 406 int comp_args_on_stack, // VMRegStackSlots 407 const BasicType *sig_bt, 408 const VMRegPair *regs); 409 410 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} 411 }; 412 413 414 // Patch the callers callsite with entry to compiled code if it exists. 415 void AdapterGenerator::patch_callers_callsite() { 416 Label L; 417 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 418 __ br_null(G3_scratch, false, Assembler::pt, L); 419 __ delayed()->nop(); 420 // Call into the VM to patch the caller, then jump to compiled callee 421 __ save_frame(4); // Args in compiled layout; do not blow them 422 423 // Must save all the live Gregs the list is: 424 // G1: 1st Long arg (32bit build) 425 // G2: global allocated to TLS 426 // G3: used in inline cache check (scratch) 427 // G4: 2nd Long arg (32bit build); 428 // G5: used in inline cache check (Method*) 429 430 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 431 432 // mov(s,d) 433 __ mov(G1, L1); 434 __ mov(G4, L4); 435 __ mov(G5_method, L5); 436 __ mov(G5_method, O0); // VM needs target method 437 __ mov(I7, O1); // VM needs caller's callsite 438 // Must be a leaf call... 439 // can be very far once the blob has been relocated 440 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 441 __ relocate(relocInfo::runtime_call_type); 442 __ jumpl_to(dest, O7, O7); 443 __ delayed()->mov(G2_thread, L7_thread_cache); 444 __ mov(L7_thread_cache, G2_thread); 445 __ mov(L1, G1); 446 __ mov(L4, G4); 447 __ mov(L5, G5_method); 448 449 __ restore(); // Restore args 450 __ bind(L); 451 } 452 453 454 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) { 455 RegisterOrConstant roc(arg_offset(st_off)); 456 return __ ensure_simm13_or_reg(roc, Rdisp); 457 } 458 459 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) { 460 RegisterOrConstant roc(next_arg_offset(st_off)); 461 return __ ensure_simm13_or_reg(roc, Rdisp); 462 } 463 464 465 // Stores long into offset pointed to by base 466 void AdapterGenerator::store_c2i_long(Register r, Register base, 467 const int st_off, bool is_stack) { 468 // In V9, longs are given 2 64-bit slots in the interpreter, but the 469 // data is passed in only 1 slot. 470 __ stx(r, base, next_arg_slot(st_off)); 471 } 472 473 void AdapterGenerator::store_c2i_object(Register r, Register base, 474 const int st_off) { 475 __ st_ptr (r, base, arg_slot(st_off)); 476 } 477 478 void AdapterGenerator::store_c2i_int(Register r, Register base, 479 const int st_off) { 480 __ st (r, base, arg_slot(st_off)); 481 } 482 483 // Stores into offset pointed to by base 484 void AdapterGenerator::store_c2i_double(VMReg r_2, 485 VMReg r_1, Register base, const int st_off) { 486 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 487 // data is passed in only 1 slot. 488 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 489 } 490 491 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 492 const int st_off) { 493 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off)); 494 } 495 496 void AdapterGenerator::gen_c2i_adapter( 497 int total_args_passed, 498 // VMReg max_arg, 499 int comp_args_on_stack, // VMRegStackSlots 500 const BasicType *sig_bt, 501 const VMRegPair *regs, 502 Label& L_skip_fixup) { 503 504 // Before we get into the guts of the C2I adapter, see if we should be here 505 // at all. We've come from compiled code and are attempting to jump to the 506 // interpreter, which means the caller made a static call to get here 507 // (vcalls always get a compiled target if there is one). Check for a 508 // compiled target. If there is one, we need to patch the caller's call. 509 // However we will run interpreted if we come thru here. The next pass 510 // thru the call site will run compiled. If we ran compiled here then 511 // we can (theorectically) do endless i2c->c2i->i2c transitions during 512 // deopt/uncommon trap cycles. If we always go interpreted here then 513 // we can have at most one and don't need to play any tricks to keep 514 // from endlessly growing the stack. 515 // 516 // Actually if we detected that we had an i2c->c2i transition here we 517 // ought to be able to reset the world back to the state of the interpreted 518 // call and not bother building another interpreter arg area. We don't 519 // do that at this point. 520 521 patch_callers_callsite(); 522 523 __ bind(L_skip_fixup); 524 525 // Since all args are passed on the stack, total_args_passed*wordSize is the 526 // space we need. Add in varargs area needed by the interpreter. Round up 527 // to stack alignment. 528 const int arg_size = total_args_passed * Interpreter::stackElementSize; 529 const int varargs_area = 530 (frame::varargs_offset - frame::register_save_words)*wordSize; 531 const int extraspace = align_up(arg_size + varargs_area, 2*wordSize); 532 533 const int bias = STACK_BIAS; 534 const int interp_arg_offset = frame::varargs_offset*wordSize + 535 (total_args_passed-1)*Interpreter::stackElementSize; 536 537 const Register base = SP; 538 539 // Make some extra space on the stack. 540 __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP); 541 set_Rdisp(G3_scratch); 542 543 // Write the args into the outgoing interpreter space. 544 for (int i = 0; i < total_args_passed; i++) { 545 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias; 546 VMReg r_1 = regs[i].first(); 547 VMReg r_2 = regs[i].second(); 548 if (!r_1->is_valid()) { 549 assert(!r_2->is_valid(), ""); 550 continue; 551 } 552 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1 553 RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias; 554 ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp); 555 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle 556 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); 557 else __ ldx(base, ld_off, G1_scratch); 558 } 559 560 if (r_1->is_Register()) { 561 Register r = r_1->as_Register()->after_restore(); 562 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 563 store_c2i_object(r, base, st_off); 564 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 565 store_c2i_long(r, base, st_off, r_2->is_stack()); 566 } else { 567 store_c2i_int(r, base, st_off); 568 } 569 } else { 570 assert(r_1->is_FloatRegister(), ""); 571 if (sig_bt[i] == T_FLOAT) { 572 store_c2i_float(r_1->as_FloatRegister(), base, st_off); 573 } else { 574 assert(sig_bt[i] == T_DOUBLE, "wrong type"); 575 store_c2i_double(r_2, r_1, base, st_off); 576 } 577 } 578 } 579 580 // Load the interpreter entry point. 581 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 582 583 // Pass O5_savedSP as an argument to the interpreter. 584 // The interpreter will restore SP to this value before returning. 585 __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP); 586 587 __ mov((frame::varargs_offset)*wordSize - 588 1*Interpreter::stackElementSize+bias+BytesPerWord, G1); 589 // Jump to the interpreter just as if interpreter was doing it. 590 __ jmpl(G3_scratch, 0, G0); 591 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp 592 // (really L0) is in use by the compiled frame as a generic temp. However, 593 // the interpreter does not know where its args are without some kind of 594 // arg pointer being passed in. Pass it in Gargs. 595 __ delayed()->add(SP, G1, Gargs); 596 } 597 598 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg, 599 address code_start, address code_end, 600 Label& L_ok) { 601 Label L_fail; 602 __ set(ExternalAddress(code_start), temp_reg); 603 __ set(pointer_delta(code_end, code_start, 1), temp2_reg); 604 __ cmp(pc_reg, temp_reg); 605 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail); 606 __ delayed()->add(temp_reg, temp2_reg, temp_reg); 607 __ cmp(pc_reg, temp_reg); 608 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); 609 __ bind(L_fail); 610 } 611 612 void AdapterGenerator::gen_i2c_adapter(int total_args_passed, 613 // VMReg max_arg, 614 int comp_args_on_stack, // VMRegStackSlots 615 const BasicType *sig_bt, 616 const VMRegPair *regs) { 617 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 618 // layout. Lesp was saved by the calling I-frame and will be restored on 619 // return. Meanwhile, outgoing arg space is all owned by the callee 620 // C-frame, so we can mangle it at will. After adjusting the frame size, 621 // hoist register arguments and repack other args according to the compiled 622 // code convention. Finally, end in a jump to the compiled code. The entry 623 // point address is the start of the buffer. 624 625 // We will only enter here from an interpreted frame and never from after 626 // passing thru a c2i. Azul allowed this but we do not. If we lose the 627 // race and use a c2i we will remain interpreted for the race loser(s). 628 // This removes all sorts of headaches on the x86 side and also eliminates 629 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 630 631 // More detail: 632 // Adapters can be frameless because they do not require the caller 633 // to perform additional cleanup work, such as correcting the stack pointer. 634 // An i2c adapter is frameless because the *caller* frame, which is interpreted, 635 // routinely repairs its own stack pointer (from interpreter_frame_last_sp), 636 // even if a callee has modified the stack pointer. 637 // A c2i adapter is frameless because the *callee* frame, which is interpreted, 638 // routinely repairs its caller's stack pointer (from sender_sp, which is set 639 // up via the senderSP register). 640 // In other words, if *either* the caller or callee is interpreted, we can 641 // get the stack pointer repaired after a call. 642 // This is why c2i and i2c adapters cannot be indefinitely composed. 643 // In particular, if a c2i adapter were to somehow call an i2c adapter, 644 // both caller and callee would be compiled methods, and neither would 645 // clean up the stack pointer changes performed by the two adapters. 646 // If this happens, control eventually transfers back to the compiled 647 // caller, but with an uncorrected stack, causing delayed havoc. 648 649 if (VerifyAdapterCalls && 650 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { 651 // So, let's test for cascading c2i/i2c adapters right now. 652 // assert(Interpreter::contains($return_addr) || 653 // StubRoutines::contains($return_addr), 654 // "i2c adapter must return to an interpreter frame"); 655 __ block_comment("verify_i2c { "); 656 Label L_ok; 657 if (Interpreter::code() != NULL) 658 range_check(masm, O7, O0, O1, 659 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 660 L_ok); 661 if (StubRoutines::code1() != NULL) 662 range_check(masm, O7, O0, O1, 663 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), 664 L_ok); 665 if (StubRoutines::code2() != NULL) 666 range_check(masm, O7, O0, O1, 667 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), 668 L_ok); 669 const char* msg = "i2c adapter must return to an interpreter frame"; 670 __ block_comment(msg); 671 __ stop(msg); 672 __ bind(L_ok); 673 __ block_comment("} verify_i2ce "); 674 } 675 676 // As you can see from the list of inputs & outputs there are not a lot 677 // of temp registers to work with: mostly G1, G3 & G4. 678 679 // Inputs: 680 // G2_thread - TLS 681 // G5_method - Method oop 682 // G4 (Gargs) - Pointer to interpreter's args 683 // O0..O4 - free for scratch 684 // O5_savedSP - Caller's saved SP, to be restored if needed 685 // O6 - Current SP! 686 // O7 - Valid return address 687 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 688 689 // Outputs: 690 // G2_thread - TLS 691 // O0-O5 - Outgoing args in compiled layout 692 // O6 - Adjusted or restored SP 693 // O7 - Valid return address 694 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 695 // F0-F7 - more outgoing args 696 697 698 // Gargs is the incoming argument base, and also an outgoing argument. 699 __ sub(Gargs, BytesPerWord, Gargs); 700 701 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME 702 // WITH O7 HOLDING A VALID RETURN PC 703 // 704 // | | 705 // : java stack : 706 // | | 707 // +--------------+ <--- start of outgoing args 708 // | receiver | | 709 // : rest of args : |---size is java-arg-words 710 // | | | 711 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I 712 // | | | 713 // : unused : |---Space for max Java stack, plus stack alignment 714 // | | | 715 // +--------------+ <--- SP + 16*wordsize 716 // | | 717 // : window : 718 // | | 719 // +--------------+ <--- SP 720 721 // WE REPACK THE STACK. We use the common calling convention layout as 722 // discovered by calling SharedRuntime::calling_convention. We assume it 723 // causes an arbitrary shuffle of memory, which may require some register 724 // temps to do the shuffle. We hope for (and optimize for) the case where 725 // temps are not needed. We may have to resize the stack slightly, in case 726 // we need alignment padding (32-bit interpreter can pass longs & doubles 727 // misaligned, but the compilers expect them aligned). 728 // 729 // | | 730 // : java stack : 731 // | | 732 // +--------------+ <--- start of outgoing args 733 // | pad, align | | 734 // +--------------+ | 735 // | ints, longs, | | 736 // | floats, | |---Outgoing stack args. 737 // : doubles : | First few args in registers. 738 // | | | 739 // +--------------+ <--- SP' + 16*wordsize 740 // | | 741 // : window : 742 // | | 743 // +--------------+ <--- SP' 744 745 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME 746 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP 747 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN. 748 749 // Cut-out for having no stack args. Since up to 6 args are passed 750 // in registers, we will commonly have no stack args. 751 if (comp_args_on_stack > 0) { 752 // Convert VMReg stack slots to words. 753 int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 754 // Round up to miminum stack alignment, in wordSize 755 comp_words_on_stack = align_up(comp_words_on_stack, 2); 756 // Now compute the distance from Lesp to SP. This calculation does not 757 // include the space for total_args_passed because Lesp has not yet popped 758 // the arguments. 759 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 760 } 761 762 // Now generate the shuffle code. Pick up all register args and move the 763 // rest through G1_scratch. 764 for (int i = 0; i < total_args_passed; i++) { 765 if (sig_bt[i] == T_VOID) { 766 // Longs and doubles are passed in native word order, but misaligned 767 // in the 32-bit build. 768 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 769 continue; 770 } 771 772 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the 773 // 32-bit build and aligned in the 64-bit build. Look for the obvious 774 // ldx/lddf optimizations. 775 776 // Load in argument order going down. 777 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize; 778 set_Rdisp(G1_scratch); 779 780 VMReg r_1 = regs[i].first(); 781 VMReg r_2 = regs[i].second(); 782 if (!r_1->is_valid()) { 783 assert(!r_2->is_valid(), ""); 784 continue; 785 } 786 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9 787 r_1 = F8->as_VMReg(); // as part of the load/store shuffle 788 if (r_2->is_valid()) r_2 = r_1->next(); 789 } 790 if (r_1->is_Register()) { // Register argument 791 Register r = r_1->as_Register()->after_restore(); 792 if (!r_2->is_valid()) { 793 __ ld(Gargs, arg_slot(ld_off), r); 794 } else { 795 // In V9, longs are given 2 64-bit slots in the interpreter, but the 796 // data is passed in only 1 slot. 797 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ? 798 next_arg_slot(ld_off) : arg_slot(ld_off); 799 __ ldx(Gargs, slot, r); 800 } 801 } else { 802 assert(r_1->is_FloatRegister(), ""); 803 if (!r_2->is_valid()) { 804 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); 805 } else { 806 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 807 // data is passed in only 1 slot. This code also handles longs that 808 // are passed on the stack, but need a stack-to-stack move through a 809 // spare float register. 810 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? 811 next_arg_slot(ld_off) : arg_slot(ld_off); 812 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); 813 } 814 } 815 // Was the argument really intended to be on the stack, but was loaded 816 // into F8/F9? 817 if (regs[i].first()->is_stack()) { 818 assert(r_1->as_FloatRegister() == F8, "fix this code"); 819 // Convert stack slot to an SP offset 820 int st_off = reg2offset(regs[i].first()) + STACK_BIAS; 821 // Store down the shuffled stack word. Target address _is_ aligned. 822 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp); 823 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot); 824 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot); 825 } 826 } 827 828 // Jump to the compiled code just as if compiled code was doing it. 829 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3); 830 #if INCLUDE_JVMCI 831 if (EnableJVMCI) { 832 // check if this call should be routed towards a specific entry point 833 __ ld(Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), G1); 834 __ cmp(G0, G1); 835 Label no_alternative_target; 836 __ br(Assembler::equal, false, Assembler::pn, no_alternative_target); 837 __ delayed()->nop(); 838 839 __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()), G3); 840 __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 841 842 __ bind(no_alternative_target); 843 } 844 #endif // INCLUDE_JVMCI 845 846 // 6243940 We might end up in handle_wrong_method if 847 // the callee is deoptimized as we race thru here. If that 848 // happens we don't want to take a safepoint because the 849 // caller frame will look interpreted and arguments are now 850 // "compiled" so it is much better to make this transition 851 // invisible to the stack walking code. Unfortunately if 852 // we try and find the callee by normal means a safepoint 853 // is possible. So we stash the desired callee in the thread 854 // and the vm will find there should this case occur. 855 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); 856 __ st_ptr(G5_method, callee_target_addr); 857 __ jmpl(G3, 0, G0); 858 __ delayed()->nop(); 859 } 860 861 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 862 int total_args_passed, 863 int comp_args_on_stack, 864 const BasicType *sig_bt, 865 const VMRegPair *regs) { 866 AdapterGenerator agen(masm); 867 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 868 } 869 870 // --------------------------------------------------------------- 871 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 872 int total_args_passed, 873 // VMReg max_arg, 874 int comp_args_on_stack, // VMRegStackSlots 875 const BasicType *sig_bt, 876 const VMRegPair *regs, 877 AdapterFingerPrint* fingerprint) { 878 address i2c_entry = __ pc(); 879 880 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 881 882 883 // ------------------------------------------------------------------------- 884 // Generate a C2I adapter. On entry we know G5 holds the Method*. The 885 // args start out packed in the compiled layout. They need to be unpacked 886 // into the interpreter layout. This will almost always require some stack 887 // space. We grow the current (compiled) stack, then repack the args. We 888 // finally end in a jump to the generic interpreter entry point. On exit 889 // from the interpreter, the interpreter will restore our SP (lest the 890 // compiled code, which relys solely on SP and not FP, get sick). 891 892 address c2i_unverified_entry = __ pc(); 893 Label L_skip_fixup; 894 { 895 Register R_temp = G1; // another scratch register 896 897 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 898 899 __ verify_oop(O0); 900 __ load_klass(O0, G3_scratch); 901 902 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp); 903 __ cmp(G3_scratch, R_temp); 904 905 Label ok, ok2; 906 __ brx(Assembler::equal, false, Assembler::pt, ok); 907 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method); 908 __ jump_to(ic_miss, G3_scratch); 909 __ delayed()->nop(); 910 911 __ bind(ok); 912 // Method might have been compiled since the call site was patched to 913 // interpreted if that is the case treat it as a miss so we can get 914 // the call site corrected. 915 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 916 __ bind(ok2); 917 __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup); 918 __ delayed()->nop(); 919 __ jump_to(ic_miss, G3_scratch); 920 __ delayed()->nop(); 921 922 } 923 924 address c2i_entry = __ pc(); 925 AdapterGenerator agen(masm); 926 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup); 927 928 __ flush(); 929 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 930 931 } 932 933 // Helper function for native calling conventions 934 static VMReg int_stk_helper( int i ) { 935 // Bias any stack based VMReg we get by ignoring the window area 936 // but not the register parameter save area. 937 // 938 // This is strange for the following reasons. We'd normally expect 939 // the calling convention to return an VMReg for a stack slot 940 // completely ignoring any abi reserved area. C2 thinks of that 941 // abi area as only out_preserve_stack_slots. This does not include 942 // the area allocated by the C abi to store down integer arguments 943 // because the java calling convention does not use it. So 944 // since c2 assumes that there are only out_preserve_stack_slots 945 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack 946 // location the c calling convention must add in this bias amount 947 // to make up for the fact that the out_preserve_stack_slots is 948 // insufficient for C calls. What a mess. I sure hope those 6 949 // stack words were worth it on every java call! 950 951 // Another way of cleaning this up would be for out_preserve_stack_slots 952 // to take a parameter to say whether it was C or java calling conventions. 953 // Then things might look a little better (but not much). 954 955 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM; 956 if( mem_parm_offset < 0 ) { 957 return as_oRegister(i)->as_VMReg(); 958 } else { 959 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word; 960 // Now return a biased offset that will be correct when out_preserve_slots is added back in 961 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots()); 962 } 963 } 964 965 966 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 967 VMRegPair *regs, 968 VMRegPair *regs2, 969 int total_args_passed) { 970 assert(regs2 == NULL, "not needed on sparc"); 971 972 // Return the number of VMReg stack_slots needed for the args. 973 // This value does not include an abi space (like register window 974 // save area). 975 976 // The native convention is V8 if !LP64 977 // The LP64 convention is the V9 convention which is slightly more sane. 978 979 // We return the amount of VMReg stack slots we need to reserve for all 980 // the arguments NOT counting out_preserve_stack_slots. Since we always 981 // have space for storing at least 6 registers to memory we start with that. 982 // See int_stk_helper for a further discussion. 983 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); 984 985 // V9 convention: All things "as-if" on double-wide stack slots. 986 // Hoist any int/ptr/long's in the first 6 to int regs. 987 // Hoist any flt/dbl's in the first 16 dbl regs. 988 int j = 0; // Count of actual args, not HALVES 989 VMRegPair param_array_reg; // location of the argument in the parameter array 990 for (int i = 0; i < total_args_passed; i++, j++) { 991 param_array_reg.set_bad(); 992 switch (sig_bt[i]) { 993 case T_BOOLEAN: 994 case T_BYTE: 995 case T_CHAR: 996 case T_INT: 997 case T_SHORT: 998 regs[i].set1(int_stk_helper(j)); 999 break; 1000 case T_LONG: 1001 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); 1002 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1003 case T_ARRAY: 1004 case T_OBJECT: 1005 case T_METADATA: 1006 regs[i].set2(int_stk_helper(j)); 1007 break; 1008 case T_FLOAT: 1009 // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here 1010 // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz 1011 // 1012 // "When a callee prototype exists, and does not indicate variable arguments, 1013 // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248 1014 // will be promoted to floating-point registers" 1015 // 1016 // By "promoted" it means that the argument is located in two places, an unused 1017 // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live 1018 // float register. In most cases, there are 6 or fewer arguments of any type, 1019 // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive) 1020 // serve as shadow slots. Per the spec floating point registers %d6 to %d16 1021 // require slots beyond that (up to %sp+BIAS+248). 1022 // 1023 { 1024 // V9ism: floats go in ODD registers and stack slots 1025 int float_index = 1 + (j << 1); 1026 param_array_reg.set1(VMRegImpl::stack2reg(float_index)); 1027 if (j < 16) { 1028 regs[i].set1(as_FloatRegister(float_index)->as_VMReg()); 1029 } else { 1030 regs[i] = param_array_reg; 1031 } 1032 } 1033 break; 1034 case T_DOUBLE: 1035 { 1036 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 1037 // V9ism: doubles go in EVEN/ODD regs and stack slots 1038 int double_index = (j << 1); 1039 param_array_reg.set2(VMRegImpl::stack2reg(double_index)); 1040 if (j < 16) { 1041 regs[i].set2(as_FloatRegister(double_index)->as_VMReg()); 1042 } else { 1043 // V9ism: doubles go in EVEN/ODD stack slots 1044 regs[i] = param_array_reg; 1045 } 1046 } 1047 break; 1048 case T_VOID: 1049 regs[i].set_bad(); 1050 j--; 1051 break; // Do not count HALVES 1052 default: 1053 ShouldNotReachHere(); 1054 } 1055 // Keep track of the deepest parameter array slot. 1056 if (!param_array_reg.first()->is_valid()) { 1057 param_array_reg = regs[i]; 1058 } 1059 if (param_array_reg.first()->is_stack()) { 1060 int off = param_array_reg.first()->reg2stack(); 1061 if (off > max_stack_slots) max_stack_slots = off; 1062 } 1063 if (param_array_reg.second()->is_stack()) { 1064 int off = param_array_reg.second()->reg2stack(); 1065 if (off > max_stack_slots) max_stack_slots = off; 1066 } 1067 } 1068 return align_up(max_stack_slots + 1, 2); 1069 1070 } 1071 1072 1073 // --------------------------------------------------------------------------- 1074 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1075 switch (ret_type) { 1076 case T_FLOAT: 1077 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS); 1078 break; 1079 case T_DOUBLE: 1080 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS); 1081 break; 1082 } 1083 } 1084 1085 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1086 switch (ret_type) { 1087 case T_FLOAT: 1088 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0); 1089 break; 1090 case T_DOUBLE: 1091 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0); 1092 break; 1093 } 1094 } 1095 1096 // Check and forward and pending exception. Thread is stored in 1097 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there 1098 // is no exception handler. We merely pop this frame off and throw the 1099 // exception in the caller's frame. 1100 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) { 1101 Label L; 1102 __ br_null(Rex_oop, false, Assembler::pt, L); 1103 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1104 // Since this is a native call, we *know* the proper exception handler 1105 // without calling into the VM: it's the empty function. Just pop this 1106 // frame and then jump to forward_exception_entry; O7 will contain the 1107 // native caller's return PC. 1108 AddressLiteral exception_entry(StubRoutines::forward_exception_entry()); 1109 __ jump_to(exception_entry, G3_scratch); 1110 __ delayed()->restore(); // Pop this frame off. 1111 __ bind(L); 1112 } 1113 1114 // A simple move of integer like type 1115 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1116 if (src.first()->is_stack()) { 1117 if (dst.first()->is_stack()) { 1118 // stack to stack 1119 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1120 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1121 } else { 1122 // stack to reg 1123 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1124 } 1125 } else if (dst.first()->is_stack()) { 1126 // reg to stack 1127 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1128 } else { 1129 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1130 } 1131 } 1132 1133 // On 64 bit we will store integer like items to the stack as 1134 // 64 bits items (sparc abi) even though java would only store 1135 // 32bits for a parameter. On 32bit it will simply be 32 bits 1136 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 1137 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1138 if (src.first()->is_stack()) { 1139 if (dst.first()->is_stack()) { 1140 // stack to stack 1141 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1142 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1143 } else { 1144 // stack to reg 1145 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1146 } 1147 } else if (dst.first()->is_stack()) { 1148 // reg to stack 1149 // Some compilers (gcc) expect a clean 32 bit value on function entry 1150 __ signx(src.first()->as_Register(), L5); 1151 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1152 } else { 1153 // Some compilers (gcc) expect a clean 32 bit value on function entry 1154 __ signx(src.first()->as_Register(), dst.first()->as_Register()); 1155 } 1156 } 1157 1158 1159 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1160 if (src.first()->is_stack()) { 1161 if (dst.first()->is_stack()) { 1162 // stack to stack 1163 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1164 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1165 } else { 1166 // stack to reg 1167 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1168 } 1169 } else if (dst.first()->is_stack()) { 1170 // reg to stack 1171 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1172 } else { 1173 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1174 } 1175 } 1176 1177 1178 // An oop arg. Must pass a handle not the oop itself 1179 static void object_move(MacroAssembler* masm, 1180 OopMap* map, 1181 int oop_handle_offset, 1182 int framesize_in_slots, 1183 VMRegPair src, 1184 VMRegPair dst, 1185 bool is_receiver, 1186 int* receiver_offset) { 1187 1188 // must pass a handle. First figure out the location we use as a handle 1189 1190 if (src.first()->is_stack()) { 1191 // Oop is already on the stack 1192 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); 1193 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); 1194 __ ld_ptr(rHandle, 0, L4); 1195 __ movr( Assembler::rc_z, L4, G0, rHandle ); 1196 if (dst.first()->is_stack()) { 1197 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1198 } 1199 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1200 if (is_receiver) { 1201 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1202 } 1203 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1204 } else { 1205 // Oop is in an input register pass we must flush it to the stack 1206 const Register rOop = src.first()->as_Register(); 1207 const Register rHandle = L5; 1208 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; 1209 int offset = oop_slot * VMRegImpl::stack_slot_size; 1210 __ st_ptr(rOop, SP, offset + STACK_BIAS); 1211 if (is_receiver) { 1212 *receiver_offset = offset; 1213 } 1214 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1215 __ add(SP, offset + STACK_BIAS, rHandle); 1216 __ movr( Assembler::rc_z, rOop, G0, rHandle ); 1217 1218 if (dst.first()->is_stack()) { 1219 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1220 } else { 1221 __ mov(rHandle, dst.first()->as_Register()); 1222 } 1223 } 1224 } 1225 1226 // A float arg may have to do float reg int reg conversion 1227 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1228 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1229 1230 if (src.first()->is_stack()) { 1231 if (dst.first()->is_stack()) { 1232 // stack to stack the easiest of the bunch 1233 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1234 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1235 } else { 1236 // stack to reg 1237 if (dst.first()->is_Register()) { 1238 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1239 } else { 1240 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1241 } 1242 } 1243 } else if (dst.first()->is_stack()) { 1244 // reg to stack 1245 if (src.first()->is_Register()) { 1246 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1247 } else { 1248 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1249 } 1250 } else { 1251 // reg to reg 1252 if (src.first()->is_Register()) { 1253 if (dst.first()->is_Register()) { 1254 // gpr -> gpr 1255 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1256 } else { 1257 // gpr -> fpr 1258 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS); 1259 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister()); 1260 } 1261 } else if (dst.first()->is_Register()) { 1262 // fpr -> gpr 1263 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS); 1264 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register()); 1265 } else { 1266 // fpr -> fpr 1267 // In theory these overlap but the ordering is such that this is likely a nop 1268 if ( src.first() != dst.first()) { 1269 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1270 } 1271 } 1272 } 1273 } 1274 1275 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1276 VMRegPair src_lo(src.first()); 1277 VMRegPair src_hi(src.second()); 1278 VMRegPair dst_lo(dst.first()); 1279 VMRegPair dst_hi(dst.second()); 1280 simple_move32(masm, src_lo, dst_lo); 1281 simple_move32(masm, src_hi, dst_hi); 1282 } 1283 1284 // A long move 1285 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1286 1287 // Do the simple ones here else do two int moves 1288 if (src.is_single_phys_reg() ) { 1289 if (dst.is_single_phys_reg()) { 1290 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1291 } else { 1292 // split src into two separate registers 1293 // Remember hi means hi address or lsw on sparc 1294 // Move msw to lsw 1295 if (dst.second()->is_reg()) { 1296 // MSW -> MSW 1297 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register()); 1298 // Now LSW -> LSW 1299 // this will only move lo -> lo and ignore hi 1300 VMRegPair split(dst.second()); 1301 simple_move32(masm, src, split); 1302 } else { 1303 VMRegPair split(src.first(), L4->as_VMReg()); 1304 // MSW -> MSW (lo ie. first word) 1305 __ srax(src.first()->as_Register(), 32, L4); 1306 split_long_move(masm, split, dst); 1307 } 1308 } 1309 } else if (dst.is_single_phys_reg()) { 1310 if (src.is_adjacent_aligned_on_stack(2)) { 1311 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1312 } else { 1313 // dst is a single reg. 1314 // Remember lo is low address not msb for stack slots 1315 // and lo is the "real" register for registers 1316 // src is 1317 1318 VMRegPair split; 1319 1320 if (src.first()->is_reg()) { 1321 // src.lo (msw) is a reg, src.hi is stk/reg 1322 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg] 1323 split.set_pair(dst.first(), src.first()); 1324 } else { 1325 // msw is stack move to L5 1326 // lsw is stack move to dst.lo (real reg) 1327 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5 1328 split.set_pair(dst.first(), L5->as_VMReg()); 1329 } 1330 1331 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg) 1332 // msw -> src.lo/L5, lsw -> dst.lo 1333 split_long_move(masm, src, split); 1334 1335 // So dst now has the low order correct position the 1336 // msw half 1337 __ sllx(split.first()->as_Register(), 32, L5); 1338 1339 const Register d = dst.first()->as_Register(); 1340 __ or3(L5, d, d); 1341 } 1342 } else { 1343 // For LP64 we can probably do better. 1344 split_long_move(masm, src, dst); 1345 } 1346 } 1347 1348 // A double move 1349 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1350 1351 // The painful thing here is that like long_move a VMRegPair might be 1352 // 1: a single physical register 1353 // 2: two physical registers (v8) 1354 // 3: a physical reg [lo] and a stack slot [hi] (v8) 1355 // 4: two stack slots 1356 1357 // Since src is always a java calling convention we know that the src pair 1358 // is always either all registers or all stack (and aligned?) 1359 1360 // in a register [lo] and a stack slot [hi] 1361 if (src.first()->is_stack()) { 1362 if (dst.first()->is_stack()) { 1363 // stack to stack the easiest of the bunch 1364 // ought to be a way to do this where if alignment is ok we use ldd/std when possible 1365 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1366 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1367 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1368 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1369 } else { 1370 // stack to reg 1371 if (dst.second()->is_stack()) { 1372 // stack -> reg, stack -> stack 1373 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1374 if (dst.first()->is_Register()) { 1375 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1376 } else { 1377 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1378 } 1379 // This was missing. (very rare case) 1380 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1381 } else { 1382 // stack -> reg 1383 // Eventually optimize for alignment QQQ 1384 if (dst.first()->is_Register()) { 1385 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1386 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register()); 1387 } else { 1388 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1389 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister()); 1390 } 1391 } 1392 } 1393 } else if (dst.first()->is_stack()) { 1394 // reg to stack 1395 if (src.first()->is_Register()) { 1396 // Eventually optimize for alignment QQQ 1397 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1398 if (src.second()->is_stack()) { 1399 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1400 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1401 } else { 1402 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS); 1403 } 1404 } else { 1405 // fpr to stack 1406 if (src.second()->is_stack()) { 1407 ShouldNotReachHere(); 1408 } else { 1409 // Is the stack aligned? 1410 if (reg2offset(dst.first()) & 0x7) { 1411 // No do as pairs 1412 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1413 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS); 1414 } else { 1415 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1416 } 1417 } 1418 } 1419 } else { 1420 // reg to reg 1421 if (src.first()->is_Register()) { 1422 if (dst.first()->is_Register()) { 1423 // gpr -> gpr 1424 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1425 __ mov(src.second()->as_Register(), dst.second()->as_Register()); 1426 } else { 1427 // gpr -> fpr 1428 // ought to be able to do a single store 1429 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS); 1430 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS); 1431 // ought to be able to do a single load 1432 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister()); 1433 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister()); 1434 } 1435 } else if (dst.first()->is_Register()) { 1436 // fpr -> gpr 1437 // ought to be able to do a single store 1438 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS); 1439 // ought to be able to do a single load 1440 // REMEMBER first() is low address not LSB 1441 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register()); 1442 if (dst.second()->is_Register()) { 1443 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register()); 1444 } else { 1445 __ ld(FP, -4 + STACK_BIAS, L4); 1446 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1447 } 1448 } else { 1449 // fpr -> fpr 1450 // In theory these overlap but the ordering is such that this is likely a nop 1451 if ( src.first() != dst.first()) { 1452 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1453 } 1454 } 1455 } 1456 } 1457 1458 // Creates an inner frame if one hasn't already been created, and 1459 // saves a copy of the thread in L7_thread_cache 1460 static void create_inner_frame(MacroAssembler* masm, bool* already_created) { 1461 if (!*already_created) { 1462 __ save_frame(0); 1463 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below 1464 // Don't use save_thread because it smashes G2 and we merely want to save a 1465 // copy 1466 __ mov(G2_thread, L7_thread_cache); 1467 *already_created = true; 1468 } 1469 } 1470 1471 1472 static void save_or_restore_arguments(MacroAssembler* masm, 1473 const int stack_slots, 1474 const int total_in_args, 1475 const int arg_save_area, 1476 OopMap* map, 1477 VMRegPair* in_regs, 1478 BasicType* in_sig_bt) { 1479 // if map is non-NULL then the code should store the values, 1480 // otherwise it should load them. 1481 if (map != NULL) { 1482 // Fill in the map 1483 for (int i = 0; i < total_in_args; i++) { 1484 if (in_sig_bt[i] == T_ARRAY) { 1485 if (in_regs[i].first()->is_stack()) { 1486 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1487 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1488 } else if (in_regs[i].first()->is_Register()) { 1489 map->set_oop(in_regs[i].first()); 1490 } else { 1491 ShouldNotReachHere(); 1492 } 1493 } 1494 } 1495 } 1496 1497 // Save or restore double word values 1498 int handle_index = 0; 1499 for (int i = 0; i < total_in_args; i++) { 1500 int slot = handle_index + arg_save_area; 1501 int offset = slot * VMRegImpl::stack_slot_size; 1502 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) { 1503 const Register reg = in_regs[i].first()->as_Register(); 1504 if (reg->is_global()) { 1505 handle_index += 2; 1506 assert(handle_index <= stack_slots, "overflow"); 1507 if (map != NULL) { 1508 __ stx(reg, SP, offset + STACK_BIAS); 1509 } else { 1510 __ ldx(SP, offset + STACK_BIAS, reg); 1511 } 1512 } 1513 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) { 1514 handle_index += 2; 1515 assert(handle_index <= stack_slots, "overflow"); 1516 if (map != NULL) { 1517 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1518 } else { 1519 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1520 } 1521 } 1522 } 1523 // Save floats 1524 for (int i = 0; i < total_in_args; i++) { 1525 int slot = handle_index + arg_save_area; 1526 int offset = slot * VMRegImpl::stack_slot_size; 1527 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) { 1528 handle_index++; 1529 assert(handle_index <= stack_slots, "overflow"); 1530 if (map != NULL) { 1531 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1532 } else { 1533 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1534 } 1535 } 1536 } 1537 1538 } 1539 1540 1541 // Check GCLocker::needs_gc and enter the runtime if it's true. This 1542 // keeps a new JNI critical region from starting until a GC has been 1543 // forced. Save down any oops in registers and describe them in an 1544 // OopMap. 1545 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1546 const int stack_slots, 1547 const int total_in_args, 1548 const int arg_save_area, 1549 OopMapSet* oop_maps, 1550 VMRegPair* in_regs, 1551 BasicType* in_sig_bt) { 1552 __ block_comment("check GCLocker::needs_gc"); 1553 Label cont; 1554 AddressLiteral sync_state(GCLocker::needs_gc_address()); 1555 __ load_bool_contents(sync_state, G3_scratch); 1556 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont); 1557 __ delayed()->nop(); 1558 1559 // Save down any values that are live in registers and call into the 1560 // runtime to halt for a GC 1561 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1562 save_or_restore_arguments(masm, stack_slots, total_in_args, 1563 arg_save_area, map, in_regs, in_sig_bt); 1564 1565 __ mov(G2_thread, L7_thread_cache); 1566 1567 __ set_last_Java_frame(SP, noreg); 1568 1569 __ block_comment("block_for_jni_critical"); 1570 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type); 1571 __ delayed()->mov(L7_thread_cache, O0); 1572 oop_maps->add_gc_map( __ offset(), map); 1573 1574 __ restore_thread(L7_thread_cache); // restore G2_thread 1575 __ reset_last_Java_frame(); 1576 1577 // Reload all the register arguments 1578 save_or_restore_arguments(masm, stack_slots, total_in_args, 1579 arg_save_area, NULL, in_regs, in_sig_bt); 1580 1581 __ bind(cont); 1582 #ifdef ASSERT 1583 if (StressCriticalJNINatives) { 1584 // Stress register saving 1585 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1586 save_or_restore_arguments(masm, stack_slots, total_in_args, 1587 arg_save_area, map, in_regs, in_sig_bt); 1588 // Destroy argument registers 1589 for (int i = 0; i < total_in_args; i++) { 1590 if (in_regs[i].first()->is_Register()) { 1591 const Register reg = in_regs[i].first()->as_Register(); 1592 if (reg->is_global()) { 1593 __ mov(G0, reg); 1594 } 1595 } else if (in_regs[i].first()->is_FloatRegister()) { 1596 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1597 } 1598 } 1599 1600 save_or_restore_arguments(masm, stack_slots, total_in_args, 1601 arg_save_area, NULL, in_regs, in_sig_bt); 1602 } 1603 #endif 1604 } 1605 1606 // Unpack an array argument into a pointer to the body and the length 1607 // if the array is non-null, otherwise pass 0 for both. 1608 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { 1609 // Pass the length, ptr pair 1610 Label is_null, done; 1611 if (reg.first()->is_stack()) { 1612 VMRegPair tmp = reg64_to_VMRegPair(L2); 1613 // Load the arg up from the stack 1614 move_ptr(masm, reg, tmp); 1615 reg = tmp; 1616 } 1617 __ cmp(reg.first()->as_Register(), G0); 1618 __ brx(Assembler::equal, false, Assembler::pt, is_null); 1619 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4); 1620 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg); 1621 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4); 1622 move32_64(masm, reg64_to_VMRegPair(L4), length_arg); 1623 __ ba_short(done); 1624 __ bind(is_null); 1625 // Pass zeros 1626 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg); 1627 move32_64(masm, reg64_to_VMRegPair(G0), length_arg); 1628 __ bind(done); 1629 } 1630 1631 static void verify_oop_args(MacroAssembler* masm, 1632 const methodHandle& method, 1633 const BasicType* sig_bt, 1634 const VMRegPair* regs) { 1635 Register temp_reg = G5_method; // not part of any compiled calling seq 1636 if (VerifyOops) { 1637 for (int i = 0; i < method->size_of_parameters(); i++) { 1638 if (sig_bt[i] == T_OBJECT || 1639 sig_bt[i] == T_ARRAY) { 1640 VMReg r = regs[i].first(); 1641 assert(r->is_valid(), "bad oop arg"); 1642 if (r->is_stack()) { 1643 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1644 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg); 1645 __ ld_ptr(SP, ld_off, temp_reg); 1646 __ verify_oop(temp_reg); 1647 } else { 1648 __ verify_oop(r->as_Register()); 1649 } 1650 } 1651 } 1652 } 1653 } 1654 1655 static void gen_special_dispatch(MacroAssembler* masm, 1656 const methodHandle& method, 1657 const BasicType* sig_bt, 1658 const VMRegPair* regs) { 1659 verify_oop_args(masm, method, sig_bt, regs); 1660 vmIntrinsics::ID iid = method->intrinsic_id(); 1661 1662 // Now write the args into the outgoing interpreter space 1663 bool has_receiver = false; 1664 Register receiver_reg = noreg; 1665 int member_arg_pos = -1; 1666 Register member_reg = noreg; 1667 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1668 if (ref_kind != 0) { 1669 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1670 member_reg = G5_method; // known to be free at this point 1671 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1672 } else if (iid == vmIntrinsics::_invokeBasic) { 1673 has_receiver = true; 1674 } else { 1675 fatal("unexpected intrinsic id %d", iid); 1676 } 1677 1678 if (member_reg != noreg) { 1679 // Load the member_arg into register, if necessary. 1680 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1681 VMReg r = regs[member_arg_pos].first(); 1682 if (r->is_stack()) { 1683 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1684 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1685 __ ld_ptr(SP, ld_off, member_reg); 1686 } else { 1687 // no data motion is needed 1688 member_reg = r->as_Register(); 1689 } 1690 } 1691 1692 if (has_receiver) { 1693 // Make sure the receiver is loaded into a register. 1694 assert(method->size_of_parameters() > 0, "oob"); 1695 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1696 VMReg r = regs[0].first(); 1697 assert(r->is_valid(), "bad receiver arg"); 1698 if (r->is_stack()) { 1699 // Porting note: This assumes that compiled calling conventions always 1700 // pass the receiver oop in a register. If this is not true on some 1701 // platform, pick a temp and load the receiver from stack. 1702 fatal("receiver always in a register"); 1703 receiver_reg = G3_scratch; // known to be free at this point 1704 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1705 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1706 __ ld_ptr(SP, ld_off, receiver_reg); 1707 } else { 1708 // no data motion is needed 1709 receiver_reg = r->as_Register(); 1710 } 1711 } 1712 1713 // Figure out which address we are really jumping to: 1714 MethodHandles::generate_method_handle_dispatch(masm, iid, 1715 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1716 } 1717 1718 // --------------------------------------------------------------------------- 1719 // Generate a native wrapper for a given method. The method takes arguments 1720 // in the Java compiled code convention, marshals them to the native 1721 // convention (handlizes oops, etc), transitions to native, makes the call, 1722 // returns to java state (possibly blocking), unhandlizes any result and 1723 // returns. 1724 // 1725 // Critical native functions are a shorthand for the use of 1726 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1727 // functions. The wrapper is expected to unpack the arguments before 1728 // passing them to the callee and perform checks before and after the 1729 // native call to ensure that they GCLocker 1730 // lock_critical/unlock_critical semantics are followed. Some other 1731 // parts of JNI setup are skipped like the tear down of the JNI handle 1732 // block and the check for pending exceptions it's impossible for them 1733 // to be thrown. 1734 // 1735 // They are roughly structured like this: 1736 // if (GCLocker::needs_gc()) 1737 // SharedRuntime::block_for_jni_critical(); 1738 // tranistion to thread_in_native 1739 // unpack arrray arguments and call native entry point 1740 // check for safepoint in progress 1741 // check if any thread suspend flags are set 1742 // call into JVM and possible unlock the JNI critical 1743 // if a GC was suppressed while in the critical native. 1744 // transition back to thread_in_Java 1745 // return to caller 1746 // 1747 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1748 const methodHandle& method, 1749 int compile_id, 1750 BasicType* in_sig_bt, 1751 VMRegPair* in_regs, 1752 BasicType ret_type) { 1753 if (method->is_method_handle_intrinsic()) { 1754 vmIntrinsics::ID iid = method->intrinsic_id(); 1755 intptr_t start = (intptr_t)__ pc(); 1756 int vep_offset = ((intptr_t)__ pc()) - start; 1757 gen_special_dispatch(masm, 1758 method, 1759 in_sig_bt, 1760 in_regs); 1761 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1762 __ flush(); 1763 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1764 return nmethod::new_native_nmethod(method, 1765 compile_id, 1766 masm->code(), 1767 vep_offset, 1768 frame_complete, 1769 stack_slots / VMRegImpl::slots_per_word, 1770 in_ByteSize(-1), 1771 in_ByteSize(-1), 1772 (OopMapSet*)NULL); 1773 } 1774 bool is_critical_native = true; 1775 address native_func = method->critical_native_function(); 1776 if (native_func == NULL) { 1777 native_func = method->native_function(); 1778 is_critical_native = false; 1779 } 1780 assert(native_func != NULL, "must have function"); 1781 1782 // Native nmethod wrappers never take possesion of the oop arguments. 1783 // So the caller will gc the arguments. The only thing we need an 1784 // oopMap for is if the call is static 1785 // 1786 // An OopMap for lock (and class if static), and one for the VM call itself 1787 OopMapSet *oop_maps = new OopMapSet(); 1788 intptr_t start = (intptr_t)__ pc(); 1789 1790 // First thing make an ic check to see if we should even be here 1791 { 1792 Label L; 1793 const Register temp_reg = G3_scratch; 1794 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1795 __ verify_oop(O0); 1796 __ load_klass(O0, temp_reg); 1797 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L); 1798 1799 __ jump_to(ic_miss, temp_reg); 1800 __ delayed()->nop(); 1801 __ align(CodeEntryAlignment); 1802 __ bind(L); 1803 } 1804 1805 int vep_offset = ((intptr_t)__ pc()) - start; 1806 1807 #ifdef COMPILER1 1808 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) { 1809 // Object.hashCode, System.identityHashCode can pull the hashCode from the 1810 // header word instead of doing a full VM transition once it's been computed. 1811 // Since hashCode is usually polymorphic at call sites we can't do this 1812 // optimization at the call site without a lot of work. 1813 Label slowCase; 1814 Label done; 1815 Register obj_reg = O0; 1816 Register result = O0; 1817 Register header = G3_scratch; 1818 Register hash = G3_scratch; // overwrite header value with hash value 1819 Register mask = G1; // to get hash field from header 1820 1821 // Unlike for Object.hashCode, System.identityHashCode is static method and 1822 // gets object as argument instead of the receiver. 1823 if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) { 1824 assert(method->is_static(), "method should be static"); 1825 // return 0 for null reference input 1826 __ br_null(obj_reg, false, Assembler::pn, done); 1827 __ delayed()->mov(obj_reg, hash); 1828 } 1829 1830 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked. 1831 // We depend on hash_mask being at most 32 bits and avoid the use of 1832 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 1833 // vm: see markOop.hpp. 1834 __ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header); 1835 __ sethi(markOopDesc::hash_mask, mask); 1836 __ btst(markOopDesc::unlocked_value, header); 1837 __ br(Assembler::zero, false, Assembler::pn, slowCase); 1838 if (UseBiasedLocking) { 1839 // Check if biased and fall through to runtime if so 1840 __ delayed()->nop(); 1841 __ btst(markOopDesc::biased_lock_bit_in_place, header); 1842 __ br(Assembler::notZero, false, Assembler::pn, slowCase); 1843 } 1844 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask); 1845 1846 // Check for a valid (non-zero) hash code and get its value. 1847 __ srlx(header, markOopDesc::hash_shift, hash); 1848 __ andcc(hash, mask, hash); 1849 __ br(Assembler::equal, false, Assembler::pn, slowCase); 1850 __ delayed()->nop(); 1851 1852 // leaf return. 1853 __ bind(done); 1854 __ retl(); 1855 __ delayed()->mov(hash, result); 1856 __ bind(slowCase); 1857 } 1858 #endif // COMPILER1 1859 1860 1861 // We have received a description of where all the java arg are located 1862 // on entry to the wrapper. We need to convert these args to where 1863 // the jni function will expect them. To figure out where they go 1864 // we convert the java signature to a C signature by inserting 1865 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1866 1867 const int total_in_args = method->size_of_parameters(); 1868 int total_c_args = total_in_args; 1869 int total_save_slots = 6 * VMRegImpl::slots_per_word; 1870 if (!is_critical_native) { 1871 total_c_args += 1; 1872 if (method->is_static()) { 1873 total_c_args++; 1874 } 1875 } else { 1876 for (int i = 0; i < total_in_args; i++) { 1877 if (in_sig_bt[i] == T_ARRAY) { 1878 // These have to be saved and restored across the safepoint 1879 total_c_args++; 1880 } 1881 } 1882 } 1883 1884 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1885 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1886 BasicType* in_elem_bt = NULL; 1887 1888 int argc = 0; 1889 if (!is_critical_native) { 1890 out_sig_bt[argc++] = T_ADDRESS; 1891 if (method->is_static()) { 1892 out_sig_bt[argc++] = T_OBJECT; 1893 } 1894 1895 for (int i = 0; i < total_in_args ; i++ ) { 1896 out_sig_bt[argc++] = in_sig_bt[i]; 1897 } 1898 } else { 1899 Thread* THREAD = Thread::current(); 1900 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); 1901 SignatureStream ss(method->signature()); 1902 for (int i = 0; i < total_in_args ; i++ ) { 1903 if (in_sig_bt[i] == T_ARRAY) { 1904 // Arrays are passed as int, elem* pair 1905 out_sig_bt[argc++] = T_INT; 1906 out_sig_bt[argc++] = T_ADDRESS; 1907 Symbol* atype = ss.as_symbol(CHECK_NULL); 1908 const char* at = atype->as_C_string(); 1909 if (strlen(at) == 2) { 1910 assert(at[0] == '[', "must be"); 1911 switch (at[1]) { 1912 case 'B': in_elem_bt[i] = T_BYTE; break; 1913 case 'C': in_elem_bt[i] = T_CHAR; break; 1914 case 'D': in_elem_bt[i] = T_DOUBLE; break; 1915 case 'F': in_elem_bt[i] = T_FLOAT; break; 1916 case 'I': in_elem_bt[i] = T_INT; break; 1917 case 'J': in_elem_bt[i] = T_LONG; break; 1918 case 'S': in_elem_bt[i] = T_SHORT; break; 1919 case 'Z': in_elem_bt[i] = T_BOOLEAN; break; 1920 default: ShouldNotReachHere(); 1921 } 1922 } 1923 } else { 1924 out_sig_bt[argc++] = in_sig_bt[i]; 1925 in_elem_bt[i] = T_VOID; 1926 } 1927 if (in_sig_bt[i] != T_VOID) { 1928 assert(in_sig_bt[i] == ss.type(), "must match"); 1929 ss.next(); 1930 } 1931 } 1932 } 1933 1934 // Now figure out where the args must be stored and how much stack space 1935 // they require (neglecting out_preserve_stack_slots but space for storing 1936 // the 1st six register arguments). It's weird see int_stk_helper. 1937 // 1938 int out_arg_slots; 1939 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 1940 1941 if (is_critical_native) { 1942 // Critical natives may have to call out so they need a save area 1943 // for register arguments. 1944 int double_slots = 0; 1945 int single_slots = 0; 1946 for ( int i = 0; i < total_in_args; i++) { 1947 if (in_regs[i].first()->is_Register()) { 1948 const Register reg = in_regs[i].first()->as_Register(); 1949 switch (in_sig_bt[i]) { 1950 case T_ARRAY: 1951 case T_BOOLEAN: 1952 case T_BYTE: 1953 case T_SHORT: 1954 case T_CHAR: 1955 case T_INT: assert(reg->is_in(), "don't need to save these"); break; 1956 case T_LONG: if (reg->is_global()) double_slots++; break; 1957 default: ShouldNotReachHere(); 1958 } 1959 } else if (in_regs[i].first()->is_FloatRegister()) { 1960 switch (in_sig_bt[i]) { 1961 case T_FLOAT: single_slots++; break; 1962 case T_DOUBLE: double_slots++; break; 1963 default: ShouldNotReachHere(); 1964 } 1965 } 1966 } 1967 total_save_slots = double_slots * 2 + single_slots; 1968 } 1969 1970 // Compute framesize for the wrapper. We need to handlize all oops in 1971 // registers. We must create space for them here that is disjoint from 1972 // the windowed save area because we have no control over when we might 1973 // flush the window again and overwrite values that gc has since modified. 1974 // (The live window race) 1975 // 1976 // We always just allocate 6 word for storing down these object. This allow 1977 // us to simply record the base and use the Ireg number to decide which 1978 // slot to use. (Note that the reg number is the inbound number not the 1979 // outbound number). 1980 // We must shuffle args to match the native convention, and include var-args space. 1981 1982 // Calculate the total number of stack slots we will need. 1983 1984 // First count the abi requirement plus all of the outgoing args 1985 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1986 1987 // Now the space for the inbound oop handle area 1988 1989 int oop_handle_offset = align_up(stack_slots, 2); 1990 stack_slots += total_save_slots; 1991 1992 // Now any space we need for handlizing a klass if static method 1993 1994 int klass_slot_offset = 0; 1995 int klass_offset = -1; 1996 int lock_slot_offset = 0; 1997 bool is_static = false; 1998 1999 if (method->is_static()) { 2000 klass_slot_offset = stack_slots; 2001 stack_slots += VMRegImpl::slots_per_word; 2002 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2003 is_static = true; 2004 } 2005 2006 // Plus a lock if needed 2007 2008 if (method->is_synchronized()) { 2009 lock_slot_offset = stack_slots; 2010 stack_slots += VMRegImpl::slots_per_word; 2011 } 2012 2013 // Now a place to save return value or as a temporary for any gpr -> fpr moves 2014 stack_slots += 2; 2015 2016 // Ok The space we have allocated will look like: 2017 // 2018 // 2019 // FP-> | | 2020 // |---------------------| 2021 // | 2 slots for moves | 2022 // |---------------------| 2023 // | lock box (if sync) | 2024 // |---------------------| <- lock_slot_offset 2025 // | klass (if static) | 2026 // |---------------------| <- klass_slot_offset 2027 // | oopHandle area | 2028 // |---------------------| <- oop_handle_offset 2029 // | outbound memory | 2030 // | based arguments | 2031 // | | 2032 // |---------------------| 2033 // | vararg area | 2034 // |---------------------| 2035 // | | 2036 // SP-> | out_preserved_slots | 2037 // 2038 // 2039 2040 2041 // Now compute actual number of stack words we need rounding to make 2042 // stack properly aligned. 2043 stack_slots = align_up(stack_slots, 2 * VMRegImpl::slots_per_word); 2044 2045 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2046 2047 // Generate stack overflow check before creating frame 2048 __ generate_stack_overflow_check(stack_size); 2049 2050 // Generate a new frame for the wrapper. 2051 __ save(SP, -stack_size, SP); 2052 2053 int frame_complete = ((intptr_t)__ pc()) - start; 2054 2055 __ verify_thread(); 2056 2057 if (is_critical_native) { 2058 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, 2059 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 2060 } 2061 2062 // 2063 // We immediately shuffle the arguments so that any vm call we have to 2064 // make from here on out (sync slow path, jvmti, etc.) we will have 2065 // captured the oops from our caller and have a valid oopMap for 2066 // them. 2067 2068 // ----------------- 2069 // The Grand Shuffle 2070 // 2071 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2072 // (derived from JavaThread* which is in L7_thread_cache) and, if static, 2073 // the class mirror instead of a receiver. This pretty much guarantees that 2074 // register layout will not match. We ignore these extra arguments during 2075 // the shuffle. The shuffle is described by the two calling convention 2076 // vectors we have in our possession. We simply walk the java vector to 2077 // get the source locations and the c vector to get the destinations. 2078 // Because we have a new window and the argument registers are completely 2079 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about 2080 // here. 2081 2082 // This is a trick. We double the stack slots so we can claim 2083 // the oops in the caller's frame. Since we are sure to have 2084 // more args than the caller doubling is enough to make 2085 // sure we can capture all the incoming oop args from the 2086 // caller. 2087 // 2088 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2089 // Record sp-based slot for receiver on stack for non-static methods 2090 int receiver_offset = -1; 2091 2092 // We move the arguments backward because the floating point registers 2093 // destination will always be to a register with a greater or equal register 2094 // number or the stack. 2095 2096 #ifdef ASSERT 2097 bool reg_destroyed[RegisterImpl::number_of_registers]; 2098 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2099 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2100 reg_destroyed[r] = false; 2101 } 2102 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2103 freg_destroyed[f] = false; 2104 } 2105 2106 #endif /* ASSERT */ 2107 2108 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) { 2109 2110 #ifdef ASSERT 2111 if (in_regs[i].first()->is_Register()) { 2112 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2113 } else if (in_regs[i].first()->is_FloatRegister()) { 2114 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!"); 2115 } 2116 if (out_regs[c_arg].first()->is_Register()) { 2117 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2118 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 2119 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true; 2120 } 2121 #endif /* ASSERT */ 2122 2123 switch (in_sig_bt[i]) { 2124 case T_ARRAY: 2125 if (is_critical_native) { 2126 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]); 2127 c_arg--; 2128 break; 2129 } 2130 case T_OBJECT: 2131 assert(!is_critical_native, "no oop arguments"); 2132 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2133 ((i == 0) && (!is_static)), 2134 &receiver_offset); 2135 break; 2136 case T_VOID: 2137 break; 2138 2139 case T_FLOAT: 2140 float_move(masm, in_regs[i], out_regs[c_arg]); 2141 break; 2142 2143 case T_DOUBLE: 2144 assert( i + 1 < total_in_args && 2145 in_sig_bt[i + 1] == T_VOID && 2146 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2147 double_move(masm, in_regs[i], out_regs[c_arg]); 2148 break; 2149 2150 case T_LONG : 2151 long_move(masm, in_regs[i], out_regs[c_arg]); 2152 break; 2153 2154 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2155 2156 default: 2157 move32_64(masm, in_regs[i], out_regs[c_arg]); 2158 } 2159 } 2160 2161 // Pre-load a static method's oop into O1. Used both by locking code and 2162 // the normal JNI call code. 2163 if (method->is_static() && !is_critical_native) { 2164 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1); 2165 2166 // Now handlize the static class mirror in O1. It's known not-null. 2167 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2168 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2169 __ add(SP, klass_offset + STACK_BIAS, O1); 2170 } 2171 2172 2173 const Register L6_handle = L6; 2174 2175 if (method->is_synchronized()) { 2176 assert(!is_critical_native, "unhandled"); 2177 __ mov(O1, L6_handle); 2178 } 2179 2180 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2181 // except O6/O7. So if we must call out we must push a new frame. We immediately 2182 // push a new frame and flush the windows. 2183 intptr_t thepc = (intptr_t) __ pc(); 2184 { 2185 address here = __ pc(); 2186 // Call the next instruction 2187 __ call(here + 8, relocInfo::none); 2188 __ delayed()->nop(); 2189 } 2190 2191 // We use the same pc/oopMap repeatedly when we call out 2192 oop_maps->add_gc_map(thepc - start, map); 2193 2194 // O7 now has the pc loaded that we will use when we finally call to native. 2195 2196 // Save thread in L7; it crosses a bunch of VM calls below 2197 // Don't use save_thread because it smashes G2 and we merely 2198 // want to save a copy 2199 __ mov(G2_thread, L7_thread_cache); 2200 2201 2202 // If we create an inner frame once is plenty 2203 // when we create it we must also save G2_thread 2204 bool inner_frame_created = false; 2205 2206 // dtrace method entry support 2207 { 2208 SkipIfEqual skip_if( 2209 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2210 // create inner frame 2211 __ save_frame(0); 2212 __ mov(G2_thread, L7_thread_cache); 2213 __ set_metadata_constant(method(), O1); 2214 __ call_VM_leaf(L7_thread_cache, 2215 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2216 G2_thread, O1); 2217 __ restore(); 2218 } 2219 2220 // RedefineClasses() tracing support for obsolete method entry 2221 if (log_is_enabled(Trace, redefine, class, obsolete)) { 2222 // create inner frame 2223 __ save_frame(0); 2224 __ mov(G2_thread, L7_thread_cache); 2225 __ set_metadata_constant(method(), O1); 2226 __ call_VM_leaf(L7_thread_cache, 2227 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2228 G2_thread, O1); 2229 __ restore(); 2230 } 2231 2232 // We are in the jni frame unless saved_frame is true in which case 2233 // we are in one frame deeper (the "inner" frame). If we are in the 2234 // "inner" frames the args are in the Iregs and if the jni frame then 2235 // they are in the Oregs. 2236 // If we ever need to go to the VM (for locking, jvmti) then 2237 // we will always be in the "inner" frame. 2238 2239 // Lock a synchronized method 2240 int lock_offset = -1; // Set if locked 2241 if (method->is_synchronized()) { 2242 Register Roop = O1; 2243 const Register L3_box = L3; 2244 2245 create_inner_frame(masm, &inner_frame_created); 2246 2247 __ ld_ptr(I1, 0, O1); 2248 Label done; 2249 2250 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size); 2251 __ add(FP, lock_offset+STACK_BIAS, L3_box); 2252 #ifdef ASSERT 2253 if (UseBiasedLocking) { 2254 // making the box point to itself will make it clear it went unused 2255 // but also be obviously invalid 2256 __ st_ptr(L3_box, L3_box, 0); 2257 } 2258 #endif // ASSERT 2259 // 2260 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch 2261 // 2262 __ compiler_lock_object(Roop, L1, L3_box, L2); 2263 __ br(Assembler::equal, false, Assembler::pt, done); 2264 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box); 2265 2266 2267 // None of the above fast optimizations worked so we have to get into the 2268 // slow case of monitor enter. Inline a special case of call_VM that 2269 // disallows any pending_exception. 2270 __ mov(Roop, O0); // Need oop in O0 2271 __ mov(L3_box, O1); 2272 2273 // Record last_Java_sp, in case the VM code releases the JVM lock. 2274 2275 __ set_last_Java_frame(FP, I7); 2276 2277 // do the call 2278 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2279 __ delayed()->mov(L7_thread_cache, O2); 2280 2281 __ restore_thread(L7_thread_cache); // restore G2_thread 2282 __ reset_last_Java_frame(); 2283 2284 #ifdef ASSERT 2285 { Label L; 2286 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2287 __ br_null_short(O0, Assembler::pt, L); 2288 __ stop("no pending exception allowed on exit from IR::monitorenter"); 2289 __ bind(L); 2290 } 2291 #endif 2292 __ bind(done); 2293 } 2294 2295 2296 // Finally just about ready to make the JNI call 2297 2298 __ flushw(); 2299 if (inner_frame_created) { 2300 __ restore(); 2301 } else { 2302 // Store only what we need from this frame 2303 // QQQ I think that non-v9 (like we care) we don't need these saves 2304 // either as the flush traps and the current window goes too. 2305 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2306 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2307 } 2308 2309 // get JNIEnv* which is first argument to native 2310 if (!is_critical_native) { 2311 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2312 } 2313 2314 // Use that pc we placed in O7 a while back as the current frame anchor 2315 __ set_last_Java_frame(SP, O7); 2316 2317 // We flushed the windows ages ago now mark them as flushed before transitioning. 2318 __ set(JavaFrameAnchor::flushed, G3_scratch); 2319 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 2320 2321 // Transition from _thread_in_Java to _thread_in_native. 2322 __ set(_thread_in_native, G3_scratch); 2323 2324 AddressLiteral dest(native_func); 2325 __ relocate(relocInfo::runtime_call_type); 2326 __ jumpl_to(dest, O7, O7); 2327 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2328 2329 __ restore_thread(L7_thread_cache); // restore G2_thread 2330 2331 // Unpack native results. For int-types, we do any needed sign-extension 2332 // and move things into I0. The return value there will survive any VM 2333 // calls for blocking or unlocking. An FP or OOP result (handle) is done 2334 // specially in the slow-path code. 2335 switch (ret_type) { 2336 case T_VOID: break; // Nothing to do! 2337 case T_FLOAT: break; // Got it where we want it (unless slow-path) 2338 case T_DOUBLE: break; // Got it where we want it (unless slow-path) 2339 // In 64 bits build result is in O0, in O0, O1 in 32bit build 2340 case T_LONG: 2341 // Fall thru 2342 case T_OBJECT: // Really a handle 2343 case T_ARRAY: 2344 case T_INT: 2345 __ mov(O0, I0); 2346 break; 2347 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false 2348 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break; 2349 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value! 2350 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break; 2351 break; // Cannot de-handlize until after reclaiming jvm_lock 2352 default: 2353 ShouldNotReachHere(); 2354 } 2355 2356 Label after_transition; 2357 // must we block? 2358 2359 // Block, if necessary, before resuming in _thread_in_Java state. 2360 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2361 { Label no_block; 2362 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 2363 2364 // Switch thread to "native transition" state before reading the synchronization state. 2365 // This additional state is necessary because reading and testing the synchronization 2366 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2367 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2368 // VM thread changes sync state to synchronizing and suspends threads for GC. 2369 // Thread A is resumed to finish this native method, but doesn't block here since it 2370 // didn't see any synchronization is progress, and escapes. 2371 __ set(_thread_in_native_trans, G3_scratch); 2372 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2373 if(os::is_MP()) { 2374 if (UseMembar) { 2375 // Force this write out before the read below 2376 __ membar(Assembler::StoreLoad); 2377 } else { 2378 // Write serialization page so VM thread can do a pseudo remote membar. 2379 // We use the current thread pointer to calculate a thread specific 2380 // offset to write to within the page. This minimizes bus traffic 2381 // due to cache line collision. 2382 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 2383 } 2384 } 2385 __ load_contents(sync_state, G3_scratch); 2386 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 2387 2388 Label L; 2389 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); 2390 __ br(Assembler::notEqual, false, Assembler::pn, L); 2391 __ delayed()->ld(suspend_state, G3_scratch); 2392 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 2393 __ bind(L); 2394 2395 // Block. Save any potential method result value before the operation and 2396 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2397 // lets us share the oopMap we used when we went native rather the create 2398 // a distinct one for this pc 2399 // 2400 save_native_result(masm, ret_type, stack_slots); 2401 if (!is_critical_native) { 2402 __ call_VM_leaf(L7_thread_cache, 2403 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2404 G2_thread); 2405 } else { 2406 __ call_VM_leaf(L7_thread_cache, 2407 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), 2408 G2_thread); 2409 } 2410 2411 // Restore any method result value 2412 restore_native_result(masm, ret_type, stack_slots); 2413 2414 if (is_critical_native) { 2415 // The call above performed the transition to thread_in_Java so 2416 // skip the transition logic below. 2417 __ ba(after_transition); 2418 __ delayed()->nop(); 2419 } 2420 2421 __ bind(no_block); 2422 } 2423 2424 // thread state is thread_in_native_trans. Any safepoint blocking has already 2425 // happened so we can now change state to _thread_in_Java. 2426 __ set(_thread_in_Java, G3_scratch); 2427 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2428 __ bind(after_transition); 2429 2430 Label no_reguard; 2431 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch); 2432 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_reserved_disabled, Assembler::notEqual, Assembler::pt, no_reguard); 2433 2434 save_native_result(masm, ret_type, stack_slots); 2435 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2436 __ delayed()->nop(); 2437 2438 __ restore_thread(L7_thread_cache); // restore G2_thread 2439 restore_native_result(masm, ret_type, stack_slots); 2440 2441 __ bind(no_reguard); 2442 2443 // Handle possible exception (will unlock if necessary) 2444 2445 // native result if any is live in freg or I0 (and I1 if long and 32bit vm) 2446 2447 // Unlock 2448 if (method->is_synchronized()) { 2449 Label done; 2450 Register I2_ex_oop = I2; 2451 const Register L3_box = L3; 2452 // Get locked oop from the handle we passed to jni 2453 __ ld_ptr(L6_handle, 0, L4); 2454 __ add(SP, lock_offset+STACK_BIAS, L3_box); 2455 // Must save pending exception around the slow-path VM call. Since it's a 2456 // leaf call, the pending exception (if any) can be kept in a register. 2457 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop); 2458 // Now unlock 2459 // (Roop, Rmark, Rbox, Rscratch) 2460 __ compiler_unlock_object(L4, L1, L3_box, L2); 2461 __ br(Assembler::equal, false, Assembler::pt, done); 2462 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box); 2463 2464 // save and restore any potential method result value around the unlocking 2465 // operation. Will save in I0 (or stack for FP returns). 2466 save_native_result(masm, ret_type, stack_slots); 2467 2468 // Must clear pending-exception before re-entering the VM. Since this is 2469 // a leaf call, pending-exception-oop can be safely kept in a register. 2470 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset())); 2471 2472 // slow case of monitor enter. Inline a special case of call_VM that 2473 // disallows any pending_exception. 2474 __ mov(L3_box, O1); 2475 2476 // Pass in current thread pointer 2477 __ mov(G2_thread, O2); 2478 2479 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type); 2480 __ delayed()->mov(L4, O0); // Need oop in O0 2481 2482 __ restore_thread(L7_thread_cache); // restore G2_thread 2483 2484 #ifdef ASSERT 2485 { Label L; 2486 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2487 __ br_null_short(O0, Assembler::pt, L); 2488 __ stop("no pending exception allowed on exit from IR::monitorexit"); 2489 __ bind(L); 2490 } 2491 #endif 2492 restore_native_result(masm, ret_type, stack_slots); 2493 // check_forward_pending_exception jump to forward_exception if any pending 2494 // exception is set. The forward_exception routine expects to see the 2495 // exception in pending_exception and not in a register. Kind of clumsy, 2496 // since all folks who branch to forward_exception must have tested 2497 // pending_exception first and hence have it in a register already. 2498 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset())); 2499 __ bind(done); 2500 } 2501 2502 // Tell dtrace about this method exit 2503 { 2504 SkipIfEqual skip_if( 2505 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2506 save_native_result(masm, ret_type, stack_slots); 2507 __ set_metadata_constant(method(), O1); 2508 __ call_VM_leaf(L7_thread_cache, 2509 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2510 G2_thread, O1); 2511 restore_native_result(masm, ret_type, stack_slots); 2512 } 2513 2514 // Clear "last Java frame" SP and PC. 2515 __ verify_thread(); // G2_thread must be correct 2516 __ reset_last_Java_frame(); 2517 2518 // Unbox oop result, e.g. JNIHandles::resolve value in I0. 2519 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2520 Label done, not_weak; 2521 __ br_null(I0, false, Assembler::pn, done); // Use NULL as-is. 2522 __ delayed()->andcc(I0, JNIHandles::weak_tag_mask, G0); // Test for jweak 2523 __ brx(Assembler::zero, true, Assembler::pt, not_weak); 2524 __ delayed()->ld_ptr(I0, 0, I0); // Maybe resolve (untagged) jobject. 2525 // Resolve jweak. 2526 __ ld_ptr(I0, -JNIHandles::weak_tag_value, I0); 2527 #if INCLUDE_ALL_GCS 2528 if (UseG1GC) { 2529 // Copy to O0 because macro doesn't allow pre_val in input reg. 2530 __ mov(I0, O0); 2531 __ g1_write_barrier_pre(noreg /* obj */, 2532 noreg /* index */, 2533 0 /* offset */, 2534 O0 /* pre_val */, 2535 G3_scratch /* tmp */, 2536 true /* preserve_o_regs */); 2537 } 2538 #endif // INCLUDE_ALL_GCS 2539 __ bind(not_weak); 2540 __ verify_oop(I0); 2541 __ bind(done); 2542 } 2543 2544 if (CheckJNICalls) { 2545 // clear_pending_jni_exception_check 2546 __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset()); 2547 } 2548 2549 if (!is_critical_native) { 2550 // reset handle block 2551 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2552 __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2553 2554 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2555 check_forward_pending_exception(masm, G3_scratch); 2556 } 2557 2558 2559 // Return 2560 2561 __ ret(); 2562 __ delayed()->restore(); 2563 2564 __ flush(); 2565 2566 nmethod *nm = nmethod::new_native_nmethod(method, 2567 compile_id, 2568 masm->code(), 2569 vep_offset, 2570 frame_complete, 2571 stack_slots / VMRegImpl::slots_per_word, 2572 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2573 in_ByteSize(lock_offset), 2574 oop_maps); 2575 2576 if (is_critical_native) { 2577 nm->set_lazy_critical_native(true); 2578 } 2579 return nm; 2580 2581 } 2582 2583 // this function returns the adjust size (in number of words) to a c2i adapter 2584 // activation for use during deoptimization 2585 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2586 assert(callee_locals >= callee_parameters, 2587 "test and remove; got more parms than locals"); 2588 if (callee_locals < callee_parameters) 2589 return 0; // No adjustment for negative locals 2590 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2591 return align_up(diff, WordsPerLong); 2592 } 2593 2594 // "Top of Stack" slots that may be unused by the calling convention but must 2595 // otherwise be preserved. 2596 // On Intel these are not necessary and the value can be zero. 2597 // On Sparc this describes the words reserved for storing a register window 2598 // when an interrupt occurs. 2599 uint SharedRuntime::out_preserve_stack_slots() { 2600 return frame::register_save_words * VMRegImpl::slots_per_word; 2601 } 2602 2603 static void gen_new_frame(MacroAssembler* masm, bool deopt) { 2604 // 2605 // Common out the new frame generation for deopt and uncommon trap 2606 // 2607 Register G3pcs = G3_scratch; // Array of new pcs (input) 2608 Register Oreturn0 = O0; 2609 Register Oreturn1 = O1; 2610 Register O2UnrollBlock = O2; 2611 Register O3array = O3; // Array of frame sizes (input) 2612 Register O4array_size = O4; // number of frames (input) 2613 Register O7frame_size = O7; // number of frames (input) 2614 2615 __ ld_ptr(O3array, 0, O7frame_size); 2616 __ sub(G0, O7frame_size, O7frame_size); 2617 __ save(SP, O7frame_size, SP); 2618 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc 2619 2620 #ifdef ASSERT 2621 // make sure that the frames are aligned properly 2622 #endif 2623 2624 // Deopt needs to pass some extra live values from frame to frame 2625 2626 if (deopt) { 2627 __ mov(Oreturn0->after_save(), Oreturn0); 2628 __ mov(Oreturn1->after_save(), Oreturn1); 2629 } 2630 2631 __ mov(O4array_size->after_save(), O4array_size); 2632 __ sub(O4array_size, 1, O4array_size); 2633 __ mov(O3array->after_save(), O3array); 2634 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock); 2635 __ add(G3pcs, wordSize, G3pcs); // point to next pc value 2636 2637 #ifdef ASSERT 2638 // trash registers to show a clear pattern in backtraces 2639 __ set(0xDEAD0000, I0); 2640 __ add(I0, 2, I1); 2641 __ add(I0, 4, I2); 2642 __ add(I0, 6, I3); 2643 __ add(I0, 8, I4); 2644 // Don't touch I5 could have valuable savedSP 2645 __ set(0xDEADBEEF, L0); 2646 __ mov(L0, L1); 2647 __ mov(L0, L2); 2648 __ mov(L0, L3); 2649 __ mov(L0, L4); 2650 __ mov(L0, L5); 2651 2652 // trash the return value as there is nothing to return yet 2653 __ set(0xDEAD0001, O7); 2654 #endif 2655 2656 __ mov(SP, O5_savedSP); 2657 } 2658 2659 2660 static void make_new_frames(MacroAssembler* masm, bool deopt) { 2661 // 2662 // loop through the UnrollBlock info and create new frames 2663 // 2664 Register G3pcs = G3_scratch; 2665 Register Oreturn0 = O0; 2666 Register Oreturn1 = O1; 2667 Register O2UnrollBlock = O2; 2668 Register O3array = O3; 2669 Register O4array_size = O4; 2670 Label loop; 2671 2672 #ifdef ASSERT 2673 // Compilers generate code that bang the stack by as much as the 2674 // interpreter would need. So this stack banging should never 2675 // trigger a fault. Verify that it does not on non product builds. 2676 if (UseStackBanging) { 2677 // Get total frame size for interpreted frames 2678 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4); 2679 __ bang_stack_size(O4, O3, G3_scratch); 2680 } 2681 #endif 2682 2683 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size); 2684 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs); 2685 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array); 2686 2687 // Adjust old interpreter frame to make space for new frame's extra java locals 2688 // 2689 // We capture the original sp for the transition frame only because it is needed in 2690 // order to properly calculate interpreter_sp_adjustment. Even though in real life 2691 // every interpreter frame captures a savedSP it is only needed at the transition 2692 // (fortunately). If we had to have it correct everywhere then we would need to 2693 // be told the sp_adjustment for each frame we create. If the frame size array 2694 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 2695 // for each frame we create and keep up the illusion every where. 2696 // 2697 2698 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7); 2699 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 2700 __ sub(SP, O7, SP); 2701 2702 #ifdef ASSERT 2703 // make sure that there is at least one entry in the array 2704 __ tst(O4array_size); 2705 __ breakpoint_trap(Assembler::zero, Assembler::icc); 2706 #endif 2707 2708 // Now push the new interpreter frames 2709 __ bind(loop); 2710 2711 // allocate a new frame, filling the registers 2712 2713 gen_new_frame(masm, deopt); // allocate an interpreter frame 2714 2715 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop); 2716 __ delayed()->add(O3array, wordSize, O3array); 2717 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc 2718 2719 } 2720 2721 //------------------------------generate_deopt_blob---------------------------- 2722 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 2723 // instead. 2724 void SharedRuntime::generate_deopt_blob() { 2725 // allocate space for the code 2726 ResourceMark rm; 2727 // setup code generation tools 2728 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 2729 #ifdef ASSERT 2730 if (UseStackBanging) { 2731 pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32; 2732 } 2733 #endif 2734 #if INCLUDE_JVMCI 2735 if (EnableJVMCI) { 2736 pad += 1000; // Increase the buffer size when compiling for JVMCI 2737 } 2738 #endif 2739 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 2740 MacroAssembler* masm = new MacroAssembler(&buffer); 2741 FloatRegister Freturn0 = F0; 2742 Register Greturn1 = G1; 2743 Register Oreturn0 = O0; 2744 Register Oreturn1 = O1; 2745 Register O2UnrollBlock = O2; 2746 Register L0deopt_mode = L0; 2747 Register G4deopt_mode = G4_scratch; 2748 int frame_size_words; 2749 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS); 2750 Label cont; 2751 2752 OopMapSet *oop_maps = new OopMapSet(); 2753 2754 // 2755 // This is the entry point for code which is returning to a de-optimized 2756 // frame. 2757 // The steps taken by this frame are as follows: 2758 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1) 2759 // and all potentially live registers (at a pollpoint many registers can be live). 2760 // 2761 // - call the C routine: Deoptimization::fetch_unroll_info (this function 2762 // returns information about the number and size of interpreter frames 2763 // which are equivalent to the frame which is being deoptimized) 2764 // - deallocate the unpack frame, restoring only results values. Other 2765 // volatile registers will now be captured in the vframeArray as needed. 2766 // - deallocate the deoptimization frame 2767 // - in a loop using the information returned in the previous step 2768 // push new interpreter frames (take care to propagate the return 2769 // values through each new frame pushed) 2770 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0) 2771 // - call the C routine: Deoptimization::unpack_frames (this function 2772 // lays out values on the interpreter frame which was just created) 2773 // - deallocate the dummy unpack_frame 2774 // - ensure that all the return values are correctly set and then do 2775 // a return to the interpreter entry point 2776 // 2777 // Refer to the following methods for more information: 2778 // - Deoptimization::fetch_unroll_info 2779 // - Deoptimization::unpack_frames 2780 2781 OopMap* map = NULL; 2782 2783 int start = __ offset(); 2784 2785 // restore G2, the trampoline destroyed it 2786 __ get_thread(); 2787 2788 // On entry we have been called by the deoptimized nmethod with a call that 2789 // replaced the original call (or safepoint polling location) so the deoptimizing 2790 // pc is now in O7. Return values are still in the expected places 2791 2792 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 2793 __ ba(cont); 2794 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode); 2795 2796 2797 #if INCLUDE_JVMCI 2798 Label after_fetch_unroll_info_call; 2799 int implicit_exception_uncommon_trap_offset = 0; 2800 int uncommon_trap_offset = 0; 2801 2802 if (EnableJVMCI) { 2803 masm->block_comment("BEGIN implicit_exception_uncommon_trap"); 2804 implicit_exception_uncommon_trap_offset = __ offset() - start; 2805 2806 __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()), O7); 2807 __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2808 __ add(O7, -8, O7); 2809 2810 uncommon_trap_offset = __ offset() - start; 2811 2812 // Save everything in sight. 2813 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 2814 __ set_last_Java_frame(SP, NULL); 2815 2816 __ ld(G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()), O1); 2817 __ sub(G0, 1, L1); 2818 __ st(L1, G2_thread, in_bytes(JavaThread::pending_deoptimization_offset())); 2819 2820 __ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode); 2821 __ mov(G2_thread, O0); 2822 __ mov(L0deopt_mode, O2); 2823 __ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)); 2824 __ delayed()->nop(); 2825 oop_maps->add_gc_map( __ offset()-start, map->deep_copy()); 2826 __ get_thread(); 2827 __ add(O7, 8, O7); 2828 __ reset_last_Java_frame(); 2829 2830 __ ba(after_fetch_unroll_info_call); 2831 __ delayed()->nop(); // Delay slot 2832 masm->block_comment("END implicit_exception_uncommon_trap"); 2833 } // EnableJVMCI 2834 #endif // INCLUDE_JVMCI 2835 2836 int exception_offset = __ offset() - start; 2837 2838 // restore G2, the trampoline destroyed it 2839 __ get_thread(); 2840 2841 // On entry we have been jumped to by the exception handler (or exception_blob 2842 // for server). O0 contains the exception oop and O7 contains the original 2843 // exception pc. So if we push a frame here it will look to the 2844 // stack walking code (fetch_unroll_info) just like a normal call so 2845 // state will be extracted normally. 2846 2847 // save exception oop in JavaThread and fall through into the 2848 // exception_in_tls case since they are handled in same way except 2849 // for where the pending exception is kept. 2850 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); 2851 2852 // 2853 // Vanilla deoptimization with an exception pending in exception_oop 2854 // 2855 int exception_in_tls_offset = __ offset() - start; 2856 2857 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 2858 // Opens a new stack frame 2859 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 2860 2861 // Restore G2_thread 2862 __ get_thread(); 2863 2864 #ifdef ASSERT 2865 { 2866 // verify that there is really an exception oop in exception_oop 2867 Label has_exception; 2868 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); 2869 __ br_notnull_short(Oexception, Assembler::pt, has_exception); 2870 __ stop("no exception in thread"); 2871 __ bind(has_exception); 2872 2873 // verify that there is no pending exception 2874 Label no_pending_exception; 2875 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 2876 __ ld_ptr(exception_addr, Oexception); 2877 __ br_null_short(Oexception, Assembler::pt, no_pending_exception); 2878 __ stop("must not have pending exception here"); 2879 __ bind(no_pending_exception); 2880 } 2881 #endif 2882 2883 __ ba(cont); 2884 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);; 2885 2886 // 2887 // Reexecute entry, similar to c2 uncommon trap 2888 // 2889 int reexecute_offset = __ offset() - start; 2890 #if INCLUDE_JVMCI && !defined(COMPILER1) 2891 if (EnableJVMCI && UseJVMCICompiler) { 2892 // JVMCI does not use this kind of deoptimization 2893 __ should_not_reach_here(); 2894 } 2895 #endif 2896 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 2897 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 2898 2899 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode); 2900 2901 __ bind(cont); 2902 2903 __ set_last_Java_frame(SP, noreg); 2904 2905 // do the call by hand so we can get the oopmap 2906 2907 __ mov(G2_thread, L7_thread_cache); 2908 __ mov(L0deopt_mode, O1); 2909 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); 2910 __ delayed()->mov(G2_thread, O0); 2911 2912 // Set an oopmap for the call site this describes all our saved volatile registers 2913 2914 oop_maps->add_gc_map( __ offset()-start, map); 2915 2916 __ mov(L7_thread_cache, G2_thread); 2917 2918 __ reset_last_Java_frame(); 2919 2920 #if INCLUDE_JVMCI 2921 if (EnableJVMCI) { 2922 __ bind(after_fetch_unroll_info_call); 2923 } 2924 #endif 2925 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers 2926 // so this move will survive 2927 2928 __ mov(L0deopt_mode, G4deopt_mode); 2929 2930 __ mov(O0, O2UnrollBlock->after_save()); 2931 2932 RegisterSaver::restore_result_registers(masm); 2933 2934 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), G4deopt_mode); 2935 Label noException; 2936 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException); 2937 2938 // Move the pending exception from exception_oop to Oexception so 2939 // the pending exception will be picked up the interpreter. 2940 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 2941 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 2942 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 2943 __ bind(noException); 2944 2945 // deallocate the deoptimization frame taking care to preserve the return values 2946 __ mov(Oreturn0, Oreturn0->after_save()); 2947 __ mov(Oreturn1, Oreturn1->after_save()); 2948 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 2949 __ restore(); 2950 2951 // Allocate new interpreter frame(s) and possible c2i adapter frame 2952 2953 make_new_frames(masm, true); 2954 2955 // push a dummy "unpack_frame" taking care of float return values and 2956 // call Deoptimization::unpack_frames to have the unpacker layout 2957 // information in the interpreter frames just created and then return 2958 // to the interpreter entry point 2959 __ save(SP, -frame_size_words*wordSize, SP); 2960 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); 2961 // LP64 uses g4 in set_last_Java_frame 2962 __ mov(G4deopt_mode, O1); 2963 __ set_last_Java_frame(SP, G0); 2964 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); 2965 __ reset_last_Java_frame(); 2966 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); 2967 2968 __ ret(); 2969 __ delayed()->restore(); 2970 2971 masm->flush(); 2972 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words); 2973 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2974 #if INCLUDE_JVMCI 2975 if (EnableJVMCI) { 2976 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); 2977 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); 2978 } 2979 #endif 2980 } 2981 2982 #ifdef COMPILER2 2983 2984 //------------------------------generate_uncommon_trap_blob-------------------- 2985 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 2986 // instead. 2987 void SharedRuntime::generate_uncommon_trap_blob() { 2988 // allocate space for the code 2989 ResourceMark rm; 2990 // setup code generation tools 2991 int pad = VerifyThread ? 512 : 0; 2992 #ifdef ASSERT 2993 if (UseStackBanging) { 2994 pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32; 2995 } 2996 #endif 2997 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 2998 MacroAssembler* masm = new MacroAssembler(&buffer); 2999 Register O2UnrollBlock = O2; 3000 Register O2klass_index = O2; 3001 3002 // 3003 // This is the entry point for all traps the compiler takes when it thinks 3004 // it cannot handle further execution of compilation code. The frame is 3005 // deoptimized in these cases and converted into interpreter frames for 3006 // execution 3007 // The steps taken by this frame are as follows: 3008 // - push a fake "unpack_frame" 3009 // - call the C routine Deoptimization::uncommon_trap (this function 3010 // packs the current compiled frame into vframe arrays and returns 3011 // information about the number and size of interpreter frames which 3012 // are equivalent to the frame which is being deoptimized) 3013 // - deallocate the "unpack_frame" 3014 // - deallocate the deoptimization frame 3015 // - in a loop using the information returned in the previous step 3016 // push interpreter frames; 3017 // - create a dummy "unpack_frame" 3018 // - call the C routine: Deoptimization::unpack_frames (this function 3019 // lays out values on the interpreter frame which was just created) 3020 // - deallocate the dummy unpack_frame 3021 // - return to the interpreter entry point 3022 // 3023 // Refer to the following methods for more information: 3024 // - Deoptimization::uncommon_trap 3025 // - Deoptimization::unpack_frame 3026 3027 // the unloaded class index is in O0 (first parameter to this blob) 3028 3029 // push a dummy "unpack_frame" 3030 // and call Deoptimization::uncommon_trap to pack the compiled frame into 3031 // vframe array and return the UnrollBlock information 3032 __ save_frame(0); 3033 __ set_last_Java_frame(SP, noreg); 3034 __ mov(I0, O2klass_index); 3035 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // exec mode 3036 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index, O3); 3037 __ reset_last_Java_frame(); 3038 __ mov(O0, O2UnrollBlock->after_save()); 3039 __ restore(); 3040 3041 // deallocate the deoptimized frame taking care to preserve the return values 3042 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3043 __ restore(); 3044 3045 #ifdef ASSERT 3046 { Label L; 3047 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), O1); 3048 __ cmp_and_br_short(O1, Deoptimization::Unpack_uncommon_trap, Assembler::equal, Assembler::pt, L); 3049 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap"); 3050 __ bind(L); 3051 } 3052 #endif 3053 3054 // Allocate new interpreter frame(s) and possible c2i adapter frame 3055 3056 make_new_frames(masm, false); 3057 3058 // push a dummy "unpack_frame" taking care of float return values and 3059 // call Deoptimization::unpack_frames to have the unpacker layout 3060 // information in the interpreter frames just created and then return 3061 // to the interpreter entry point 3062 __ save_frame(0); 3063 __ set_last_Java_frame(SP, noreg); 3064 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case 3065 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3); 3066 __ reset_last_Java_frame(); 3067 __ ret(); 3068 __ delayed()->restore(); 3069 3070 masm->flush(); 3071 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize); 3072 } 3073 3074 #endif // COMPILER2 3075 3076 //------------------------------generate_handler_blob------------------- 3077 // 3078 // Generate a special Compile2Runtime blob that saves all registers, and sets 3079 // up an OopMap. 3080 // 3081 // This blob is jumped to (via a breakpoint and the signal handler) from a 3082 // safepoint in compiled code. On entry to this blob, O7 contains the 3083 // address in the original nmethod at which we should resume normal execution. 3084 // Thus, this blob looks like a subroutine which must preserve lots of 3085 // registers and return normally. Note that O7 is never register-allocated, 3086 // so it is guaranteed to be free here. 3087 // 3088 3089 // The hardest part of what this blob must do is to save the 64-bit %o 3090 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and 3091 // an interrupt will chop off their heads. Making space in the caller's frame 3092 // first will let us save the 64-bit %o's before save'ing, but we cannot hand 3093 // the adjusted FP off to the GC stack-crawler: this will modify the caller's 3094 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save 3095 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). 3096 // Tricky, tricky, tricky... 3097 3098 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3099 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3100 3101 // allocate space for the code 3102 ResourceMark rm; 3103 // setup code generation tools 3104 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3105 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3106 CodeBuffer buffer("handler_blob", 1600, 512); 3107 MacroAssembler* masm = new MacroAssembler(&buffer); 3108 int frame_size_words; 3109 OopMapSet *oop_maps = new OopMapSet(); 3110 OopMap* map = NULL; 3111 3112 int start = __ offset(); 3113 3114 bool cause_return = (poll_type == POLL_AT_RETURN); 3115 // If this causes a return before the processing, then do a "restore" 3116 if (cause_return) { 3117 __ restore(); 3118 } else { 3119 // Make it look like we were called via the poll 3120 // so that frame constructor always sees a valid return address 3121 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7); 3122 __ sub(O7, frame::pc_return_offset, O7); 3123 } 3124 3125 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3126 3127 // setup last_Java_sp (blows G4) 3128 __ set_last_Java_frame(SP, noreg); 3129 3130 // call into the runtime to handle illegal instructions exception 3131 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3132 __ mov(G2_thread, O0); 3133 __ save_thread(L7_thread_cache); 3134 __ call(call_ptr); 3135 __ delayed()->nop(); 3136 3137 // Set an oopmap for the call site. 3138 // We need this not only for callee-saved registers, but also for volatile 3139 // registers that the compiler might be keeping live across a safepoint. 3140 3141 oop_maps->add_gc_map( __ offset() - start, map); 3142 3143 __ restore_thread(L7_thread_cache); 3144 // clear last_Java_sp 3145 __ reset_last_Java_frame(); 3146 3147 // Check for exceptions 3148 Label pending; 3149 3150 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3151 __ br_notnull_short(O1, Assembler::pn, pending); 3152 3153 RegisterSaver::restore_live_registers(masm); 3154 3155 // We are back the the original state on entry and ready to go. 3156 3157 __ retl(); 3158 __ delayed()->nop(); 3159 3160 // Pending exception after the safepoint 3161 3162 __ bind(pending); 3163 3164 RegisterSaver::restore_live_registers(masm); 3165 3166 // We are back the the original state on entry. 3167 3168 // Tail-call forward_exception_entry, with the issuing PC in O7, 3169 // so it looks like the original nmethod called forward_exception_entry. 3170 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3171 __ JMP(O0, 0); 3172 __ delayed()->nop(); 3173 3174 // ------------- 3175 // make sure all code is generated 3176 masm->flush(); 3177 3178 // return exception blob 3179 return SafepointBlob::create(&buffer, oop_maps, frame_size_words); 3180 } 3181 3182 // 3183 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3184 // 3185 // Generate a stub that calls into vm to find out the proper destination 3186 // of a java call. All the argument registers are live at this point 3187 // but since this is generic code we don't know what they are and the caller 3188 // must do any gc of the args. 3189 // 3190 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3191 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3192 3193 // allocate space for the code 3194 ResourceMark rm; 3195 // setup code generation tools 3196 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3197 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3198 CodeBuffer buffer(name, 1600, 512); 3199 MacroAssembler* masm = new MacroAssembler(&buffer); 3200 int frame_size_words; 3201 OopMapSet *oop_maps = new OopMapSet(); 3202 OopMap* map = NULL; 3203 3204 int start = __ offset(); 3205 3206 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3207 3208 int frame_complete = __ offset(); 3209 3210 // setup last_Java_sp (blows G4) 3211 __ set_last_Java_frame(SP, noreg); 3212 3213 // call into the runtime to handle illegal instructions exception 3214 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3215 __ mov(G2_thread, O0); 3216 __ save_thread(L7_thread_cache); 3217 __ call(destination, relocInfo::runtime_call_type); 3218 __ delayed()->nop(); 3219 3220 // O0 contains the address we are going to jump to assuming no exception got installed 3221 3222 // Set an oopmap for the call site. 3223 // We need this not only for callee-saved registers, but also for volatile 3224 // registers that the compiler might be keeping live across a safepoint. 3225 3226 oop_maps->add_gc_map( __ offset() - start, map); 3227 3228 __ restore_thread(L7_thread_cache); 3229 // clear last_Java_sp 3230 __ reset_last_Java_frame(); 3231 3232 // Check for exceptions 3233 Label pending; 3234 3235 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3236 __ br_notnull_short(O1, Assembler::pn, pending); 3237 3238 // get the returned Method* 3239 3240 __ get_vm_result_2(G5_method); 3241 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 3242 3243 // O0 is where we want to jump, overwrite G3 which is saved and scratch 3244 3245 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 3246 3247 RegisterSaver::restore_live_registers(masm); 3248 3249 // We are back the the original state on entry and ready to go. 3250 3251 __ JMP(G3, 0); 3252 __ delayed()->nop(); 3253 3254 // Pending exception after the safepoint 3255 3256 __ bind(pending); 3257 3258 RegisterSaver::restore_live_registers(masm); 3259 3260 // We are back the the original state on entry. 3261 3262 // Tail-call forward_exception_entry, with the issuing PC in O7, 3263 // so it looks like the original nmethod called forward_exception_entry. 3264 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3265 __ JMP(O0, 0); 3266 __ delayed()->nop(); 3267 3268 // ------------- 3269 // make sure all code is generated 3270 masm->flush(); 3271 3272 // return the blob 3273 // frame_size_words or bytes?? 3274 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3275 }