1 /* 2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "code/debugInfoRec.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/vtableStubs.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "oops/compiledICHolder.hpp" 32 #include "prims/jvmtiRedefineClassesTrace.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "runtime/vframeArray.hpp" 35 #include "vmreg_sparc.inline.hpp" 36 #ifdef COMPILER1 37 #include "c1/c1_Runtime1.hpp" 38 #endif 39 #ifdef COMPILER2 40 #include "opto/runtime.hpp" 41 #endif 42 #ifdef SHARK 43 #include "compiler/compileBroker.hpp" 44 #include "shark/sharkCompiler.hpp" 45 #endif 46 47 #define __ masm-> 48 49 50 class RegisterSaver { 51 52 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O. 53 // The Oregs are problematic. In the 32bit build the compiler can 54 // have O registers live with 64 bit quantities. A window save will 55 // cut the heads off of the registers. We have to do a very extensive 56 // stack dance to save and restore these properly. 57 58 // Note that the Oregs problem only exists if we block at either a polling 59 // page exception a compiled code safepoint that was not originally a call 60 // or deoptimize following one of these kinds of safepoints. 61 62 // Lots of registers to save. For all builds, a window save will preserve 63 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit 64 // builds a window-save will preserve the %o registers. In the LION build 65 // we need to save the 64-bit %o registers which requires we save them 66 // before the window-save (as then they become %i registers and get their 67 // heads chopped off on interrupt). We have to save some %g registers here 68 // as well. 69 enum { 70 // This frame's save area. Includes extra space for the native call: 71 // vararg's layout space and the like. Briefly holds the caller's 72 // register save area. 73 call_args_area = frame::register_save_words_sp_offset + 74 frame::memory_parameter_word_sp_offset*wordSize, 75 // Make sure save locations are always 8 byte aligned. 76 // can't use round_to because it doesn't produce compile time constant 77 start_of_extra_save_area = ((call_args_area + 7) & ~7), 78 g1_offset = start_of_extra_save_area, // g-regs needing saving 79 g3_offset = g1_offset+8, 80 g4_offset = g3_offset+8, 81 g5_offset = g4_offset+8, 82 o0_offset = g5_offset+8, 83 o1_offset = o0_offset+8, 84 o2_offset = o1_offset+8, 85 o3_offset = o2_offset+8, 86 o4_offset = o3_offset+8, 87 o5_offset = o4_offset+8, 88 start_of_flags_save_area = o5_offset+8, 89 ccr_offset = start_of_flags_save_area, 90 fsr_offset = ccr_offset + 8, 91 d00_offset = fsr_offset+8, // Start of float save area 92 register_save_size = d00_offset+8*32 93 }; 94 95 96 public: 97 98 static int Oexception_offset() { return o0_offset; }; 99 static int G3_offset() { return g3_offset; }; 100 static int G5_offset() { return g5_offset; }; 101 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 102 static void restore_live_registers(MacroAssembler* masm); 103 104 // During deoptimization only the result register need to be restored 105 // all the other values have already been extracted. 106 107 static void restore_result_registers(MacroAssembler* masm); 108 }; 109 110 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 111 // Record volatile registers as callee-save values in an OopMap so their save locations will be 112 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 113 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 114 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 115 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 116 int i; 117 // Always make the frame size 16 byte aligned. 118 int frame_size = round_to(additional_frame_words + register_save_size, 16); 119 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words 120 int frame_size_in_slots = frame_size / sizeof(jint); 121 // CodeBlob frame size is in words. 122 *total_frame_words = frame_size / wordSize; 123 // OopMap* map = new OopMap(*total_frame_words, 0); 124 OopMap* map = new OopMap(frame_size_in_slots, 0); 125 126 #if !defined(_LP64) 127 128 // Save 64-bit O registers; they will get their heads chopped off on a 'save'. 129 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 130 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 131 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 132 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 133 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 134 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 135 #endif /* _LP64 */ 136 137 __ save(SP, -frame_size, SP); 138 139 #ifndef _LP64 140 // Reload the 64 bit Oregs. Although they are now Iregs we load them 141 // to Oregs here to avoid interrupts cutting off their heads 142 143 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 144 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 145 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 146 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 147 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 148 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 149 150 __ stx(O0, SP, o0_offset+STACK_BIAS); 151 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg()); 152 153 __ stx(O1, SP, o1_offset+STACK_BIAS); 154 155 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg()); 156 157 __ stx(O2, SP, o2_offset+STACK_BIAS); 158 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg()); 159 160 __ stx(O3, SP, o3_offset+STACK_BIAS); 161 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg()); 162 163 __ stx(O4, SP, o4_offset+STACK_BIAS); 164 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg()); 165 166 __ stx(O5, SP, o5_offset+STACK_BIAS); 167 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg()); 168 #endif /* _LP64 */ 169 170 171 #ifdef _LP64 172 int debug_offset = 0; 173 #else 174 int debug_offset = 4; 175 #endif 176 // Save the G's 177 __ stx(G1, SP, g1_offset+STACK_BIAS); 178 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); 179 180 __ stx(G3, SP, g3_offset+STACK_BIAS); 181 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg()); 182 183 __ stx(G4, SP, g4_offset+STACK_BIAS); 184 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg()); 185 186 __ stx(G5, SP, g5_offset+STACK_BIAS); 187 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg()); 188 189 // This is really a waste but we'll keep things as they were for now 190 if (true) { 191 #ifndef _LP64 192 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next()); 193 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next()); 194 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next()); 195 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next()); 196 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next()); 197 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next()); 198 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next()); 199 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next()); 200 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next()); 201 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next()); 202 #endif /* _LP64 */ 203 } 204 205 206 // Save the flags 207 __ rdccr( G5 ); 208 __ stx(G5, SP, ccr_offset+STACK_BIAS); 209 __ stxfsr(SP, fsr_offset+STACK_BIAS); 210 211 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles) 212 int offset = d00_offset; 213 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 214 FloatRegister f = as_FloatRegister(i); 215 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS); 216 // Record as callee saved both halves of double registers (2 float registers). 217 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg()); 218 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next()); 219 offset += sizeof(double); 220 } 221 222 // And we're done. 223 224 return map; 225 } 226 227 228 // Pop the current frame and restore all the registers that we 229 // saved. 230 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 231 232 // Restore all the FP registers 233 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 234 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i)); 235 } 236 237 __ ldx(SP, ccr_offset+STACK_BIAS, G1); 238 __ wrccr (G1) ; 239 240 // Restore the G's 241 // Note that G2 (AKA GThread) must be saved and restored separately. 242 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr. 243 244 __ ldx(SP, g1_offset+STACK_BIAS, G1); 245 __ ldx(SP, g3_offset+STACK_BIAS, G3); 246 __ ldx(SP, g4_offset+STACK_BIAS, G4); 247 __ ldx(SP, g5_offset+STACK_BIAS, G5); 248 249 250 #if !defined(_LP64) 251 // Restore the 64-bit O's. 252 __ ldx(SP, o0_offset+STACK_BIAS, O0); 253 __ ldx(SP, o1_offset+STACK_BIAS, O1); 254 __ ldx(SP, o2_offset+STACK_BIAS, O2); 255 __ ldx(SP, o3_offset+STACK_BIAS, O3); 256 __ ldx(SP, o4_offset+STACK_BIAS, O4); 257 __ ldx(SP, o5_offset+STACK_BIAS, O5); 258 259 // And temporarily place them in TLS 260 261 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 262 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 263 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 264 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 265 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 266 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 267 #endif /* _LP64 */ 268 269 // Restore flags 270 271 __ ldxfsr(SP, fsr_offset+STACK_BIAS); 272 273 __ restore(); 274 275 #if !defined(_LP64) 276 // Now reload the 64bit Oregs after we've restore the window. 277 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 278 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 279 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 280 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 281 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 282 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 283 #endif /* _LP64 */ 284 285 } 286 287 // Pop the current frame and restore the registers that might be holding 288 // a result. 289 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 290 291 #if !defined(_LP64) 292 // 32bit build returns longs in G1 293 __ ldx(SP, g1_offset+STACK_BIAS, G1); 294 295 // Retrieve the 64-bit O's. 296 __ ldx(SP, o0_offset+STACK_BIAS, O0); 297 __ ldx(SP, o1_offset+STACK_BIAS, O1); 298 // and save to TLS 299 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 300 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 301 #endif /* _LP64 */ 302 303 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); 304 305 __ restore(); 306 307 #if !defined(_LP64) 308 // Now reload the 64bit Oregs after we've restore the window. 309 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 310 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 311 #endif /* _LP64 */ 312 313 } 314 315 // Is vector's size (in bytes) bigger than a size saved by default? 316 // 8 bytes FP registers are saved by default on SPARC. 317 bool SharedRuntime::is_wide_vector(int size) { 318 // Note, MaxVectorSize == 8 on SPARC. 319 assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size)); 320 return size > 8; 321 } 322 323 // The java_calling_convention describes stack locations as ideal slots on 324 // a frame with no abi restrictions. Since we must observe abi restrictions 325 // (like the placement of the register window) the slots must be biased by 326 // the following value. 327 static int reg2offset(VMReg r) { 328 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 329 } 330 331 static VMRegPair reg64_to_VMRegPair(Register r) { 332 VMRegPair ret; 333 if (wordSize == 8) { 334 ret.set2(r->as_VMReg()); 335 } else { 336 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg()); 337 } 338 return ret; 339 } 340 341 // --------------------------------------------------------------------------- 342 // Read the array of BasicTypes from a signature, and compute where the 343 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 344 // quantities. Values less than VMRegImpl::stack0 are registers, those above 345 // refer to 4-byte stack slots. All stack slots are based off of the window 346 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window, 347 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 348 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit 349 // integer registers. Values 64-95 are the (32-bit only) float registers. 350 // Each 32-bit quantity is given its own number, so the integer registers 351 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is 352 // an O0-low and an O0-high. Essentially, all int register numbers are doubled. 353 354 // Register results are passed in O0-O5, for outgoing call arguments. To 355 // convert to incoming arguments, convert all O's to I's. The regs array 356 // refer to the low and hi 32-bit words of 64-bit registers or stack slots. 357 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a 358 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was 359 // passed (used as a placeholder for the other half of longs and doubles in 360 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is 361 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention). 362 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first() 363 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the 364 // same VMRegPair. 365 366 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 367 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 368 // units regardless of build. 369 370 371 // --------------------------------------------------------------------------- 372 // The compiled Java calling convention. The Java convention always passes 373 // 64-bit values in adjacent aligned locations (either registers or stack), 374 // floats in float registers and doubles in aligned float pairs. There is 375 // no backing varargs store for values in registers. 376 // In the 32-bit build, longs are passed on the stack (cannot be 377 // passed in I's, because longs in I's get their heads chopped off at 378 // interrupt). 379 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 380 VMRegPair *regs, 381 int total_args_passed, 382 int is_outgoing) { 383 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers"); 384 385 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM; 386 const int flt_reg_max = 8; 387 388 int int_reg = 0; 389 int flt_reg = 0; 390 int slot = 0; 391 392 for (int i = 0; i < total_args_passed; i++) { 393 switch (sig_bt[i]) { 394 case T_INT: 395 case T_SHORT: 396 case T_CHAR: 397 case T_BYTE: 398 case T_BOOLEAN: 399 #ifndef _LP64 400 case T_OBJECT: 401 case T_ARRAY: 402 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 403 #endif // _LP64 404 if (int_reg < int_reg_max) { 405 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 406 regs[i].set1(r->as_VMReg()); 407 } else { 408 regs[i].set1(VMRegImpl::stack2reg(slot++)); 409 } 410 break; 411 412 #ifdef _LP64 413 case T_LONG: 414 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 415 // fall-through 416 case T_OBJECT: 417 case T_ARRAY: 418 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 419 if (int_reg < int_reg_max) { 420 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 421 regs[i].set2(r->as_VMReg()); 422 } else { 423 slot = round_to(slot, 2); // align 424 regs[i].set2(VMRegImpl::stack2reg(slot)); 425 slot += 2; 426 } 427 break; 428 #else 429 case T_LONG: 430 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 431 // On 32-bit SPARC put longs always on the stack to keep the pressure off 432 // integer argument registers. They should be used for oops. 433 slot = round_to(slot, 2); // align 434 regs[i].set2(VMRegImpl::stack2reg(slot)); 435 slot += 2; 436 #endif 437 break; 438 439 case T_FLOAT: 440 if (flt_reg < flt_reg_max) { 441 FloatRegister r = as_FloatRegister(flt_reg++); 442 regs[i].set1(r->as_VMReg()); 443 } else { 444 regs[i].set1(VMRegImpl::stack2reg(slot++)); 445 } 446 break; 447 448 case T_DOUBLE: 449 assert(sig_bt[i+1] == T_VOID, "expecting half"); 450 if (round_to(flt_reg, 2) + 1 < flt_reg_max) { 451 flt_reg = round_to(flt_reg, 2); // align 452 FloatRegister r = as_FloatRegister(flt_reg); 453 regs[i].set2(r->as_VMReg()); 454 flt_reg += 2; 455 } else { 456 slot = round_to(slot, 2); // align 457 regs[i].set2(VMRegImpl::stack2reg(slot)); 458 slot += 2; 459 } 460 break; 461 462 case T_VOID: 463 regs[i].set_bad(); // Halves of longs & doubles 464 break; 465 466 default: 467 fatal(err_msg_res("unknown basic type %d", sig_bt[i])); 468 break; 469 } 470 } 471 472 // retun the amount of stack space these arguments will need. 473 return slot; 474 } 475 476 // Helper class mostly to avoid passing masm everywhere, and handle 477 // store displacement overflow logic. 478 class AdapterGenerator { 479 MacroAssembler *masm; 480 Register Rdisp; 481 void set_Rdisp(Register r) { Rdisp = r; } 482 483 void patch_callers_callsite(); 484 485 // base+st_off points to top of argument 486 int arg_offset(const int st_off) { return st_off; } 487 int next_arg_offset(const int st_off) { 488 return st_off - Interpreter::stackElementSize; 489 } 490 491 // Argument slot values may be loaded first into a register because 492 // they might not fit into displacement. 493 RegisterOrConstant arg_slot(const int st_off); 494 RegisterOrConstant next_arg_slot(const int st_off); 495 496 // Stores long into offset pointed to by base 497 void store_c2i_long(Register r, Register base, 498 const int st_off, bool is_stack); 499 void store_c2i_object(Register r, Register base, 500 const int st_off); 501 void store_c2i_int(Register r, Register base, 502 const int st_off); 503 void store_c2i_double(VMReg r_2, 504 VMReg r_1, Register base, const int st_off); 505 void store_c2i_float(FloatRegister f, Register base, 506 const int st_off); 507 508 public: 509 void gen_c2i_adapter(int total_args_passed, 510 // VMReg max_arg, 511 int comp_args_on_stack, // VMRegStackSlots 512 const BasicType *sig_bt, 513 const VMRegPair *regs, 514 Label& skip_fixup); 515 void gen_i2c_adapter(int total_args_passed, 516 // VMReg max_arg, 517 int comp_args_on_stack, // VMRegStackSlots 518 const BasicType *sig_bt, 519 const VMRegPair *regs); 520 521 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} 522 }; 523 524 525 // Patch the callers callsite with entry to compiled code if it exists. 526 void AdapterGenerator::patch_callers_callsite() { 527 Label L; 528 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 529 __ br_null(G3_scratch, false, Assembler::pt, L); 530 __ delayed()->nop(); 531 // Call into the VM to patch the caller, then jump to compiled callee 532 __ save_frame(4); // Args in compiled layout; do not blow them 533 534 // Must save all the live Gregs the list is: 535 // G1: 1st Long arg (32bit build) 536 // G2: global allocated to TLS 537 // G3: used in inline cache check (scratch) 538 // G4: 2nd Long arg (32bit build); 539 // G5: used in inline cache check (Method*) 540 541 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 542 543 #ifdef _LP64 544 // mov(s,d) 545 __ mov(G1, L1); 546 __ mov(G4, L4); 547 __ mov(G5_method, L5); 548 __ mov(G5_method, O0); // VM needs target method 549 __ mov(I7, O1); // VM needs caller's callsite 550 // Must be a leaf call... 551 // can be very far once the blob has been relocated 552 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 553 __ relocate(relocInfo::runtime_call_type); 554 __ jumpl_to(dest, O7, O7); 555 __ delayed()->mov(G2_thread, L7_thread_cache); 556 __ mov(L7_thread_cache, G2_thread); 557 __ mov(L1, G1); 558 __ mov(L4, G4); 559 __ mov(L5, G5_method); 560 #else 561 __ stx(G1, FP, -8 + STACK_BIAS); 562 __ stx(G4, FP, -16 + STACK_BIAS); 563 __ mov(G5_method, L5); 564 __ mov(G5_method, O0); // VM needs target method 565 __ mov(I7, O1); // VM needs caller's callsite 566 // Must be a leaf call... 567 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type); 568 __ delayed()->mov(G2_thread, L7_thread_cache); 569 __ mov(L7_thread_cache, G2_thread); 570 __ ldx(FP, -8 + STACK_BIAS, G1); 571 __ ldx(FP, -16 + STACK_BIAS, G4); 572 __ mov(L5, G5_method); 573 #endif /* _LP64 */ 574 575 __ restore(); // Restore args 576 __ bind(L); 577 } 578 579 580 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) { 581 RegisterOrConstant roc(arg_offset(st_off)); 582 return __ ensure_simm13_or_reg(roc, Rdisp); 583 } 584 585 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) { 586 RegisterOrConstant roc(next_arg_offset(st_off)); 587 return __ ensure_simm13_or_reg(roc, Rdisp); 588 } 589 590 591 // Stores long into offset pointed to by base 592 void AdapterGenerator::store_c2i_long(Register r, Register base, 593 const int st_off, bool is_stack) { 594 #ifdef _LP64 595 // In V9, longs are given 2 64-bit slots in the interpreter, but the 596 // data is passed in only 1 slot. 597 __ stx(r, base, next_arg_slot(st_off)); 598 #else 599 #ifdef COMPILER2 600 // Misaligned store of 64-bit data 601 __ stw(r, base, arg_slot(st_off)); // lo bits 602 __ srlx(r, 32, r); 603 __ stw(r, base, next_arg_slot(st_off)); // hi bits 604 #else 605 if (is_stack) { 606 // Misaligned store of 64-bit data 607 __ stw(r, base, arg_slot(st_off)); // lo bits 608 __ srlx(r, 32, r); 609 __ stw(r, base, next_arg_slot(st_off)); // hi bits 610 } else { 611 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits 612 __ stw(r , base, next_arg_slot(st_off)); // hi bits 613 } 614 #endif // COMPILER2 615 #endif // _LP64 616 } 617 618 void AdapterGenerator::store_c2i_object(Register r, Register base, 619 const int st_off) { 620 __ st_ptr (r, base, arg_slot(st_off)); 621 } 622 623 void AdapterGenerator::store_c2i_int(Register r, Register base, 624 const int st_off) { 625 __ st (r, base, arg_slot(st_off)); 626 } 627 628 // Stores into offset pointed to by base 629 void AdapterGenerator::store_c2i_double(VMReg r_2, 630 VMReg r_1, Register base, const int st_off) { 631 #ifdef _LP64 632 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 633 // data is passed in only 1 slot. 634 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 635 #else 636 // Need to marshal 64-bit value from misaligned Lesp loads 637 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 638 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) ); 639 #endif 640 } 641 642 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 643 const int st_off) { 644 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off)); 645 } 646 647 void AdapterGenerator::gen_c2i_adapter( 648 int total_args_passed, 649 // VMReg max_arg, 650 int comp_args_on_stack, // VMRegStackSlots 651 const BasicType *sig_bt, 652 const VMRegPair *regs, 653 Label& L_skip_fixup) { 654 655 // Before we get into the guts of the C2I adapter, see if we should be here 656 // at all. We've come from compiled code and are attempting to jump to the 657 // interpreter, which means the caller made a static call to get here 658 // (vcalls always get a compiled target if there is one). Check for a 659 // compiled target. If there is one, we need to patch the caller's call. 660 // However we will run interpreted if we come thru here. The next pass 661 // thru the call site will run compiled. If we ran compiled here then 662 // we can (theorectically) do endless i2c->c2i->i2c transitions during 663 // deopt/uncommon trap cycles. If we always go interpreted here then 664 // we can have at most one and don't need to play any tricks to keep 665 // from endlessly growing the stack. 666 // 667 // Actually if we detected that we had an i2c->c2i transition here we 668 // ought to be able to reset the world back to the state of the interpreted 669 // call and not bother building another interpreter arg area. We don't 670 // do that at this point. 671 672 patch_callers_callsite(); 673 674 __ bind(L_skip_fixup); 675 676 // Since all args are passed on the stack, total_args_passed*wordSize is the 677 // space we need. Add in varargs area needed by the interpreter. Round up 678 // to stack alignment. 679 const int arg_size = total_args_passed * Interpreter::stackElementSize; 680 const int varargs_area = 681 (frame::varargs_offset - frame::register_save_words)*wordSize; 682 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize); 683 684 const int bias = STACK_BIAS; 685 const int interp_arg_offset = frame::varargs_offset*wordSize + 686 (total_args_passed-1)*Interpreter::stackElementSize; 687 688 const Register base = SP; 689 690 // Make some extra space on the stack. 691 __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP); 692 set_Rdisp(G3_scratch); 693 694 // Write the args into the outgoing interpreter space. 695 for (int i = 0; i < total_args_passed; i++) { 696 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias; 697 VMReg r_1 = regs[i].first(); 698 VMReg r_2 = regs[i].second(); 699 if (!r_1->is_valid()) { 700 assert(!r_2->is_valid(), ""); 701 continue; 702 } 703 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1 704 RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias; 705 ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp); 706 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle 707 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); 708 else __ ldx(base, ld_off, G1_scratch); 709 } 710 711 if (r_1->is_Register()) { 712 Register r = r_1->as_Register()->after_restore(); 713 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 714 store_c2i_object(r, base, st_off); 715 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 716 store_c2i_long(r, base, st_off, r_2->is_stack()); 717 } else { 718 store_c2i_int(r, base, st_off); 719 } 720 } else { 721 assert(r_1->is_FloatRegister(), ""); 722 if (sig_bt[i] == T_FLOAT) { 723 store_c2i_float(r_1->as_FloatRegister(), base, st_off); 724 } else { 725 assert(sig_bt[i] == T_DOUBLE, "wrong type"); 726 store_c2i_double(r_2, r_1, base, st_off); 727 } 728 } 729 } 730 731 // Load the interpreter entry point. 732 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 733 734 // Pass O5_savedSP as an argument to the interpreter. 735 // The interpreter will restore SP to this value before returning. 736 __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP); 737 738 __ mov((frame::varargs_offset)*wordSize - 739 1*Interpreter::stackElementSize+bias+BytesPerWord, G1); 740 // Jump to the interpreter just as if interpreter was doing it. 741 __ jmpl(G3_scratch, 0, G0); 742 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp 743 // (really L0) is in use by the compiled frame as a generic temp. However, 744 // the interpreter does not know where its args are without some kind of 745 // arg pointer being passed in. Pass it in Gargs. 746 __ delayed()->add(SP, G1, Gargs); 747 } 748 749 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg, 750 address code_start, address code_end, 751 Label& L_ok) { 752 Label L_fail; 753 __ set(ExternalAddress(code_start), temp_reg); 754 __ set(pointer_delta(code_end, code_start, 1), temp2_reg); 755 __ cmp(pc_reg, temp_reg); 756 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail); 757 __ delayed()->add(temp_reg, temp2_reg, temp_reg); 758 __ cmp(pc_reg, temp_reg); 759 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); 760 __ bind(L_fail); 761 } 762 763 void AdapterGenerator::gen_i2c_adapter( 764 int total_args_passed, 765 // VMReg max_arg, 766 int comp_args_on_stack, // VMRegStackSlots 767 const BasicType *sig_bt, 768 const VMRegPair *regs) { 769 770 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 771 // layout. Lesp was saved by the calling I-frame and will be restored on 772 // return. Meanwhile, outgoing arg space is all owned by the callee 773 // C-frame, so we can mangle it at will. After adjusting the frame size, 774 // hoist register arguments and repack other args according to the compiled 775 // code convention. Finally, end in a jump to the compiled code. The entry 776 // point address is the start of the buffer. 777 778 // We will only enter here from an interpreted frame and never from after 779 // passing thru a c2i. Azul allowed this but we do not. If we lose the 780 // race and use a c2i we will remain interpreted for the race loser(s). 781 // This removes all sorts of headaches on the x86 side and also eliminates 782 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 783 784 // More detail: 785 // Adapters can be frameless because they do not require the caller 786 // to perform additional cleanup work, such as correcting the stack pointer. 787 // An i2c adapter is frameless because the *caller* frame, which is interpreted, 788 // routinely repairs its own stack pointer (from interpreter_frame_last_sp), 789 // even if a callee has modified the stack pointer. 790 // A c2i adapter is frameless because the *callee* frame, which is interpreted, 791 // routinely repairs its caller's stack pointer (from sender_sp, which is set 792 // up via the senderSP register). 793 // In other words, if *either* the caller or callee is interpreted, we can 794 // get the stack pointer repaired after a call. 795 // This is why c2i and i2c adapters cannot be indefinitely composed. 796 // In particular, if a c2i adapter were to somehow call an i2c adapter, 797 // both caller and callee would be compiled methods, and neither would 798 // clean up the stack pointer changes performed by the two adapters. 799 // If this happens, control eventually transfers back to the compiled 800 // caller, but with an uncorrected stack, causing delayed havoc. 801 802 if (VerifyAdapterCalls && 803 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { 804 // So, let's test for cascading c2i/i2c adapters right now. 805 // assert(Interpreter::contains($return_addr) || 806 // StubRoutines::contains($return_addr), 807 // "i2c adapter must return to an interpreter frame"); 808 __ block_comment("verify_i2c { "); 809 Label L_ok; 810 if (Interpreter::code() != NULL) 811 range_check(masm, O7, O0, O1, 812 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 813 L_ok); 814 if (StubRoutines::code1() != NULL) 815 range_check(masm, O7, O0, O1, 816 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), 817 L_ok); 818 if (StubRoutines::code2() != NULL) 819 range_check(masm, O7, O0, O1, 820 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), 821 L_ok); 822 const char* msg = "i2c adapter must return to an interpreter frame"; 823 __ block_comment(msg); 824 __ stop(msg); 825 __ bind(L_ok); 826 __ block_comment("} verify_i2ce "); 827 } 828 829 // As you can see from the list of inputs & outputs there are not a lot 830 // of temp registers to work with: mostly G1, G3 & G4. 831 832 // Inputs: 833 // G2_thread - TLS 834 // G5_method - Method oop 835 // G4 (Gargs) - Pointer to interpreter's args 836 // O0..O4 - free for scratch 837 // O5_savedSP - Caller's saved SP, to be restored if needed 838 // O6 - Current SP! 839 // O7 - Valid return address 840 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 841 842 // Outputs: 843 // G2_thread - TLS 844 // O0-O5 - Outgoing args in compiled layout 845 // O6 - Adjusted or restored SP 846 // O7 - Valid return address 847 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 848 // F0-F7 - more outgoing args 849 850 851 // Gargs is the incoming argument base, and also an outgoing argument. 852 __ sub(Gargs, BytesPerWord, Gargs); 853 854 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME 855 // WITH O7 HOLDING A VALID RETURN PC 856 // 857 // | | 858 // : java stack : 859 // | | 860 // +--------------+ <--- start of outgoing args 861 // | receiver | | 862 // : rest of args : |---size is java-arg-words 863 // | | | 864 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I 865 // | | | 866 // : unused : |---Space for max Java stack, plus stack alignment 867 // | | | 868 // +--------------+ <--- SP + 16*wordsize 869 // | | 870 // : window : 871 // | | 872 // +--------------+ <--- SP 873 874 // WE REPACK THE STACK. We use the common calling convention layout as 875 // discovered by calling SharedRuntime::calling_convention. We assume it 876 // causes an arbitrary shuffle of memory, which may require some register 877 // temps to do the shuffle. We hope for (and optimize for) the case where 878 // temps are not needed. We may have to resize the stack slightly, in case 879 // we need alignment padding (32-bit interpreter can pass longs & doubles 880 // misaligned, but the compilers expect them aligned). 881 // 882 // | | 883 // : java stack : 884 // | | 885 // +--------------+ <--- start of outgoing args 886 // | pad, align | | 887 // +--------------+ | 888 // | ints, longs, | | 889 // | floats, | |---Outgoing stack args. 890 // : doubles : | First few args in registers. 891 // | | | 892 // +--------------+ <--- SP' + 16*wordsize 893 // | | 894 // : window : 895 // | | 896 // +--------------+ <--- SP' 897 898 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME 899 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP 900 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN. 901 902 // Cut-out for having no stack args. Since up to 6 args are passed 903 // in registers, we will commonly have no stack args. 904 if (comp_args_on_stack > 0) { 905 // Convert VMReg stack slots to words. 906 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 907 // Round up to miminum stack alignment, in wordSize 908 comp_words_on_stack = round_to(comp_words_on_stack, 2); 909 // Now compute the distance from Lesp to SP. This calculation does not 910 // include the space for total_args_passed because Lesp has not yet popped 911 // the arguments. 912 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 913 } 914 915 // Now generate the shuffle code. Pick up all register args and move the 916 // rest through G1_scratch. 917 for (int i = 0; i < total_args_passed; i++) { 918 if (sig_bt[i] == T_VOID) { 919 // Longs and doubles are passed in native word order, but misaligned 920 // in the 32-bit build. 921 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 922 continue; 923 } 924 925 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the 926 // 32-bit build and aligned in the 64-bit build. Look for the obvious 927 // ldx/lddf optimizations. 928 929 // Load in argument order going down. 930 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize; 931 set_Rdisp(G1_scratch); 932 933 VMReg r_1 = regs[i].first(); 934 VMReg r_2 = regs[i].second(); 935 if (!r_1->is_valid()) { 936 assert(!r_2->is_valid(), ""); 937 continue; 938 } 939 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9 940 r_1 = F8->as_VMReg(); // as part of the load/store shuffle 941 if (r_2->is_valid()) r_2 = r_1->next(); 942 } 943 if (r_1->is_Register()) { // Register argument 944 Register r = r_1->as_Register()->after_restore(); 945 if (!r_2->is_valid()) { 946 __ ld(Gargs, arg_slot(ld_off), r); 947 } else { 948 #ifdef _LP64 949 // In V9, longs are given 2 64-bit slots in the interpreter, but the 950 // data is passed in only 1 slot. 951 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ? 952 next_arg_slot(ld_off) : arg_slot(ld_off); 953 __ ldx(Gargs, slot, r); 954 #else 955 fatal("longs should be on stack"); 956 #endif 957 } 958 } else { 959 assert(r_1->is_FloatRegister(), ""); 960 if (!r_2->is_valid()) { 961 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); 962 } else { 963 #ifdef _LP64 964 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 965 // data is passed in only 1 slot. This code also handles longs that 966 // are passed on the stack, but need a stack-to-stack move through a 967 // spare float register. 968 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? 969 next_arg_slot(ld_off) : arg_slot(ld_off); 970 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); 971 #else 972 // Need to marshal 64-bit value from misaligned Lesp loads 973 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister()); 974 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister()); 975 #endif 976 } 977 } 978 // Was the argument really intended to be on the stack, but was loaded 979 // into F8/F9? 980 if (regs[i].first()->is_stack()) { 981 assert(r_1->as_FloatRegister() == F8, "fix this code"); 982 // Convert stack slot to an SP offset 983 int st_off = reg2offset(regs[i].first()) + STACK_BIAS; 984 // Store down the shuffled stack word. Target address _is_ aligned. 985 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp); 986 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot); 987 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot); 988 } 989 } 990 991 // Jump to the compiled code just as if compiled code was doing it. 992 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3); 993 994 // 6243940 We might end up in handle_wrong_method if 995 // the callee is deoptimized as we race thru here. If that 996 // happens we don't want to take a safepoint because the 997 // caller frame will look interpreted and arguments are now 998 // "compiled" so it is much better to make this transition 999 // invisible to the stack walking code. Unfortunately if 1000 // we try and find the callee by normal means a safepoint 1001 // is possible. So we stash the desired callee in the thread 1002 // and the vm will find there should this case occur. 1003 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); 1004 __ st_ptr(G5_method, callee_target_addr); 1005 __ jmpl(G3, 0, G0); 1006 __ delayed()->nop(); 1007 } 1008 1009 // --------------------------------------------------------------- 1010 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1011 int total_args_passed, 1012 // VMReg max_arg, 1013 int comp_args_on_stack, // VMRegStackSlots 1014 const BasicType *sig_bt, 1015 const VMRegPair *regs, 1016 AdapterFingerPrint* fingerprint) { 1017 address i2c_entry = __ pc(); 1018 1019 AdapterGenerator agen(masm); 1020 1021 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 1022 1023 1024 // ------------------------------------------------------------------------- 1025 // Generate a C2I adapter. On entry we know G5 holds the Method*. The 1026 // args start out packed in the compiled layout. They need to be unpacked 1027 // into the interpreter layout. This will almost always require some stack 1028 // space. We grow the current (compiled) stack, then repack the args. We 1029 // finally end in a jump to the generic interpreter entry point. On exit 1030 // from the interpreter, the interpreter will restore our SP (lest the 1031 // compiled code, which relys solely on SP and not FP, get sick). 1032 1033 address c2i_unverified_entry = __ pc(); 1034 Label L_skip_fixup; 1035 { 1036 Register R_temp = G1; // another scratch register 1037 1038 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1039 1040 __ verify_oop(O0); 1041 __ load_klass(O0, G3_scratch); 1042 1043 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp); 1044 __ cmp(G3_scratch, R_temp); 1045 1046 Label ok, ok2; 1047 __ brx(Assembler::equal, false, Assembler::pt, ok); 1048 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method); 1049 __ jump_to(ic_miss, G3_scratch); 1050 __ delayed()->nop(); 1051 1052 __ bind(ok); 1053 // Method might have been compiled since the call site was patched to 1054 // interpreted if that is the case treat it as a miss so we can get 1055 // the call site corrected. 1056 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 1057 __ bind(ok2); 1058 __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup); 1059 __ delayed()->nop(); 1060 __ jump_to(ic_miss, G3_scratch); 1061 __ delayed()->nop(); 1062 1063 } 1064 1065 address c2i_entry = __ pc(); 1066 1067 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup); 1068 1069 __ flush(); 1070 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 1071 1072 } 1073 1074 // Helper function for native calling conventions 1075 static VMReg int_stk_helper( int i ) { 1076 // Bias any stack based VMReg we get by ignoring the window area 1077 // but not the register parameter save area. 1078 // 1079 // This is strange for the following reasons. We'd normally expect 1080 // the calling convention to return an VMReg for a stack slot 1081 // completely ignoring any abi reserved area. C2 thinks of that 1082 // abi area as only out_preserve_stack_slots. This does not include 1083 // the area allocated by the C abi to store down integer arguments 1084 // because the java calling convention does not use it. So 1085 // since c2 assumes that there are only out_preserve_stack_slots 1086 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack 1087 // location the c calling convention must add in this bias amount 1088 // to make up for the fact that the out_preserve_stack_slots is 1089 // insufficient for C calls. What a mess. I sure hope those 6 1090 // stack words were worth it on every java call! 1091 1092 // Another way of cleaning this up would be for out_preserve_stack_slots 1093 // to take a parameter to say whether it was C or java calling conventions. 1094 // Then things might look a little better (but not much). 1095 1096 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM; 1097 if( mem_parm_offset < 0 ) { 1098 return as_oRegister(i)->as_VMReg(); 1099 } else { 1100 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word; 1101 // Now return a biased offset that will be correct when out_preserve_slots is added back in 1102 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots()); 1103 } 1104 } 1105 1106 1107 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1108 VMRegPair *regs, 1109 int total_args_passed) { 1110 1111 // Return the number of VMReg stack_slots needed for the args. 1112 // This value does not include an abi space (like register window 1113 // save area). 1114 1115 // The native convention is V8 if !LP64 1116 // The LP64 convention is the V9 convention which is slightly more sane. 1117 1118 // We return the amount of VMReg stack slots we need to reserve for all 1119 // the arguments NOT counting out_preserve_stack_slots. Since we always 1120 // have space for storing at least 6 registers to memory we start with that. 1121 // See int_stk_helper for a further discussion. 1122 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); 1123 1124 #ifdef _LP64 1125 // V9 convention: All things "as-if" on double-wide stack slots. 1126 // Hoist any int/ptr/long's in the first 6 to int regs. 1127 // Hoist any flt/dbl's in the first 16 dbl regs. 1128 int j = 0; // Count of actual args, not HALVES 1129 for( int i=0; i<total_args_passed; i++, j++ ) { 1130 switch( sig_bt[i] ) { 1131 case T_BOOLEAN: 1132 case T_BYTE: 1133 case T_CHAR: 1134 case T_INT: 1135 case T_SHORT: 1136 regs[i].set1( int_stk_helper( j ) ); break; 1137 case T_LONG: 1138 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1139 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1140 case T_ARRAY: 1141 case T_OBJECT: 1142 case T_METADATA: 1143 regs[i].set2( int_stk_helper( j ) ); 1144 break; 1145 case T_FLOAT: 1146 if ( j < 16 ) { 1147 // V9ism: floats go in ODD registers 1148 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg()); 1149 } else { 1150 // V9ism: floats go in ODD stack slot 1151 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1))); 1152 } 1153 break; 1154 case T_DOUBLE: 1155 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1156 if ( j < 16 ) { 1157 // V9ism: doubles go in EVEN/ODD regs 1158 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg()); 1159 } else { 1160 // V9ism: doubles go in EVEN/ODD stack slots 1161 regs[i].set2(VMRegImpl::stack2reg(j<<1)); 1162 } 1163 break; 1164 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES 1165 default: 1166 ShouldNotReachHere(); 1167 } 1168 if (regs[i].first()->is_stack()) { 1169 int off = regs[i].first()->reg2stack(); 1170 if (off > max_stack_slots) max_stack_slots = off; 1171 } 1172 if (regs[i].second()->is_stack()) { 1173 int off = regs[i].second()->reg2stack(); 1174 if (off > max_stack_slots) max_stack_slots = off; 1175 } 1176 } 1177 1178 #else // _LP64 1179 // V8 convention: first 6 things in O-regs, rest on stack. 1180 // Alignment is willy-nilly. 1181 for( int i=0; i<total_args_passed; i++ ) { 1182 switch( sig_bt[i] ) { 1183 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1184 case T_ARRAY: 1185 case T_BOOLEAN: 1186 case T_BYTE: 1187 case T_CHAR: 1188 case T_FLOAT: 1189 case T_INT: 1190 case T_OBJECT: 1191 case T_METADATA: 1192 case T_SHORT: 1193 regs[i].set1( int_stk_helper( i ) ); 1194 break; 1195 case T_DOUBLE: 1196 case T_LONG: 1197 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1198 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) ); 1199 break; 1200 case T_VOID: regs[i].set_bad(); break; 1201 default: 1202 ShouldNotReachHere(); 1203 } 1204 if (regs[i].first()->is_stack()) { 1205 int off = regs[i].first()->reg2stack(); 1206 if (off > max_stack_slots) max_stack_slots = off; 1207 } 1208 if (regs[i].second()->is_stack()) { 1209 int off = regs[i].second()->reg2stack(); 1210 if (off > max_stack_slots) max_stack_slots = off; 1211 } 1212 } 1213 #endif // _LP64 1214 1215 return round_to(max_stack_slots + 1, 2); 1216 1217 } 1218 1219 1220 // --------------------------------------------------------------------------- 1221 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1222 switch (ret_type) { 1223 case T_FLOAT: 1224 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS); 1225 break; 1226 case T_DOUBLE: 1227 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS); 1228 break; 1229 } 1230 } 1231 1232 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1233 switch (ret_type) { 1234 case T_FLOAT: 1235 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0); 1236 break; 1237 case T_DOUBLE: 1238 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0); 1239 break; 1240 } 1241 } 1242 1243 // Check and forward and pending exception. Thread is stored in 1244 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there 1245 // is no exception handler. We merely pop this frame off and throw the 1246 // exception in the caller's frame. 1247 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) { 1248 Label L; 1249 __ br_null(Rex_oop, false, Assembler::pt, L); 1250 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1251 // Since this is a native call, we *know* the proper exception handler 1252 // without calling into the VM: it's the empty function. Just pop this 1253 // frame and then jump to forward_exception_entry; O7 will contain the 1254 // native caller's return PC. 1255 AddressLiteral exception_entry(StubRoutines::forward_exception_entry()); 1256 __ jump_to(exception_entry, G3_scratch); 1257 __ delayed()->restore(); // Pop this frame off. 1258 __ bind(L); 1259 } 1260 1261 // A simple move of integer like type 1262 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1263 if (src.first()->is_stack()) { 1264 if (dst.first()->is_stack()) { 1265 // stack to stack 1266 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1267 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1268 } else { 1269 // stack to reg 1270 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1271 } 1272 } else if (dst.first()->is_stack()) { 1273 // reg to stack 1274 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1275 } else { 1276 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1277 } 1278 } 1279 1280 // On 64 bit we will store integer like items to the stack as 1281 // 64 bits items (sparc abi) even though java would only store 1282 // 32bits for a parameter. On 32bit it will simply be 32 bits 1283 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 1284 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1285 if (src.first()->is_stack()) { 1286 if (dst.first()->is_stack()) { 1287 // stack to stack 1288 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1289 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1290 } else { 1291 // stack to reg 1292 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1293 } 1294 } else if (dst.first()->is_stack()) { 1295 // reg to stack 1296 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1297 } else { 1298 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1299 } 1300 } 1301 1302 1303 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1304 if (src.first()->is_stack()) { 1305 if (dst.first()->is_stack()) { 1306 // stack to stack 1307 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1308 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1309 } else { 1310 // stack to reg 1311 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1312 } 1313 } else if (dst.first()->is_stack()) { 1314 // reg to stack 1315 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1316 } else { 1317 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1318 } 1319 } 1320 1321 1322 // An oop arg. Must pass a handle not the oop itself 1323 static void object_move(MacroAssembler* masm, 1324 OopMap* map, 1325 int oop_handle_offset, 1326 int framesize_in_slots, 1327 VMRegPair src, 1328 VMRegPair dst, 1329 bool is_receiver, 1330 int* receiver_offset) { 1331 1332 // must pass a handle. First figure out the location we use as a handle 1333 1334 if (src.first()->is_stack()) { 1335 // Oop is already on the stack 1336 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); 1337 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); 1338 __ ld_ptr(rHandle, 0, L4); 1339 #ifdef _LP64 1340 __ movr( Assembler::rc_z, L4, G0, rHandle ); 1341 #else 1342 __ tst( L4 ); 1343 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1344 #endif 1345 if (dst.first()->is_stack()) { 1346 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1347 } 1348 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1349 if (is_receiver) { 1350 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1351 } 1352 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1353 } else { 1354 // Oop is in an input register pass we must flush it to the stack 1355 const Register rOop = src.first()->as_Register(); 1356 const Register rHandle = L5; 1357 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; 1358 int offset = oop_slot*VMRegImpl::stack_slot_size; 1359 Label skip; 1360 __ st_ptr(rOop, SP, offset + STACK_BIAS); 1361 if (is_receiver) { 1362 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size; 1363 } 1364 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1365 __ add(SP, offset + STACK_BIAS, rHandle); 1366 #ifdef _LP64 1367 __ movr( Assembler::rc_z, rOop, G0, rHandle ); 1368 #else 1369 __ tst( rOop ); 1370 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1371 #endif 1372 1373 if (dst.first()->is_stack()) { 1374 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1375 } else { 1376 __ mov(rHandle, dst.first()->as_Register()); 1377 } 1378 } 1379 } 1380 1381 // A float arg may have to do float reg int reg conversion 1382 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1383 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1384 1385 if (src.first()->is_stack()) { 1386 if (dst.first()->is_stack()) { 1387 // stack to stack the easiest of the bunch 1388 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1389 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1390 } else { 1391 // stack to reg 1392 if (dst.first()->is_Register()) { 1393 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1394 } else { 1395 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1396 } 1397 } 1398 } else if (dst.first()->is_stack()) { 1399 // reg to stack 1400 if (src.first()->is_Register()) { 1401 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1402 } else { 1403 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1404 } 1405 } else { 1406 // reg to reg 1407 if (src.first()->is_Register()) { 1408 if (dst.first()->is_Register()) { 1409 // gpr -> gpr 1410 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1411 } else { 1412 // gpr -> fpr 1413 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS); 1414 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister()); 1415 } 1416 } else if (dst.first()->is_Register()) { 1417 // fpr -> gpr 1418 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS); 1419 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register()); 1420 } else { 1421 // fpr -> fpr 1422 // In theory these overlap but the ordering is such that this is likely a nop 1423 if ( src.first() != dst.first()) { 1424 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1425 } 1426 } 1427 } 1428 } 1429 1430 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1431 VMRegPair src_lo(src.first()); 1432 VMRegPair src_hi(src.second()); 1433 VMRegPair dst_lo(dst.first()); 1434 VMRegPair dst_hi(dst.second()); 1435 simple_move32(masm, src_lo, dst_lo); 1436 simple_move32(masm, src_hi, dst_hi); 1437 } 1438 1439 // A long move 1440 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1441 1442 // Do the simple ones here else do two int moves 1443 if (src.is_single_phys_reg() ) { 1444 if (dst.is_single_phys_reg()) { 1445 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1446 } else { 1447 // split src into two separate registers 1448 // Remember hi means hi address or lsw on sparc 1449 // Move msw to lsw 1450 if (dst.second()->is_reg()) { 1451 // MSW -> MSW 1452 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register()); 1453 // Now LSW -> LSW 1454 // this will only move lo -> lo and ignore hi 1455 VMRegPair split(dst.second()); 1456 simple_move32(masm, src, split); 1457 } else { 1458 VMRegPair split(src.first(), L4->as_VMReg()); 1459 // MSW -> MSW (lo ie. first word) 1460 __ srax(src.first()->as_Register(), 32, L4); 1461 split_long_move(masm, split, dst); 1462 } 1463 } 1464 } else if (dst.is_single_phys_reg()) { 1465 if (src.is_adjacent_aligned_on_stack(2)) { 1466 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1467 } else { 1468 // dst is a single reg. 1469 // Remember lo is low address not msb for stack slots 1470 // and lo is the "real" register for registers 1471 // src is 1472 1473 VMRegPair split; 1474 1475 if (src.first()->is_reg()) { 1476 // src.lo (msw) is a reg, src.hi is stk/reg 1477 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg] 1478 split.set_pair(dst.first(), src.first()); 1479 } else { 1480 // msw is stack move to L5 1481 // lsw is stack move to dst.lo (real reg) 1482 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5 1483 split.set_pair(dst.first(), L5->as_VMReg()); 1484 } 1485 1486 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg) 1487 // msw -> src.lo/L5, lsw -> dst.lo 1488 split_long_move(masm, src, split); 1489 1490 // So dst now has the low order correct position the 1491 // msw half 1492 __ sllx(split.first()->as_Register(), 32, L5); 1493 1494 const Register d = dst.first()->as_Register(); 1495 __ or3(L5, d, d); 1496 } 1497 } else { 1498 // For LP64 we can probably do better. 1499 split_long_move(masm, src, dst); 1500 } 1501 } 1502 1503 // A double move 1504 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1505 1506 // The painful thing here is that like long_move a VMRegPair might be 1507 // 1: a single physical register 1508 // 2: two physical registers (v8) 1509 // 3: a physical reg [lo] and a stack slot [hi] (v8) 1510 // 4: two stack slots 1511 1512 // Since src is always a java calling convention we know that the src pair 1513 // is always either all registers or all stack (and aligned?) 1514 1515 // in a register [lo] and a stack slot [hi] 1516 if (src.first()->is_stack()) { 1517 if (dst.first()->is_stack()) { 1518 // stack to stack the easiest of the bunch 1519 // ought to be a way to do this where if alignment is ok we use ldd/std when possible 1520 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1521 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1522 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1523 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1524 } else { 1525 // stack to reg 1526 if (dst.second()->is_stack()) { 1527 // stack -> reg, stack -> stack 1528 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1529 if (dst.first()->is_Register()) { 1530 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1531 } else { 1532 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1533 } 1534 // This was missing. (very rare case) 1535 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1536 } else { 1537 // stack -> reg 1538 // Eventually optimize for alignment QQQ 1539 if (dst.first()->is_Register()) { 1540 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1541 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register()); 1542 } else { 1543 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1544 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister()); 1545 } 1546 } 1547 } 1548 } else if (dst.first()->is_stack()) { 1549 // reg to stack 1550 if (src.first()->is_Register()) { 1551 // Eventually optimize for alignment QQQ 1552 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1553 if (src.second()->is_stack()) { 1554 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1555 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1556 } else { 1557 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS); 1558 } 1559 } else { 1560 // fpr to stack 1561 if (src.second()->is_stack()) { 1562 ShouldNotReachHere(); 1563 } else { 1564 // Is the stack aligned? 1565 if (reg2offset(dst.first()) & 0x7) { 1566 // No do as pairs 1567 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1568 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS); 1569 } else { 1570 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1571 } 1572 } 1573 } 1574 } else { 1575 // reg to reg 1576 if (src.first()->is_Register()) { 1577 if (dst.first()->is_Register()) { 1578 // gpr -> gpr 1579 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1580 __ mov(src.second()->as_Register(), dst.second()->as_Register()); 1581 } else { 1582 // gpr -> fpr 1583 // ought to be able to do a single store 1584 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS); 1585 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS); 1586 // ought to be able to do a single load 1587 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister()); 1588 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister()); 1589 } 1590 } else if (dst.first()->is_Register()) { 1591 // fpr -> gpr 1592 // ought to be able to do a single store 1593 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS); 1594 // ought to be able to do a single load 1595 // REMEMBER first() is low address not LSB 1596 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register()); 1597 if (dst.second()->is_Register()) { 1598 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register()); 1599 } else { 1600 __ ld(FP, -4 + STACK_BIAS, L4); 1601 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1602 } 1603 } else { 1604 // fpr -> fpr 1605 // In theory these overlap but the ordering is such that this is likely a nop 1606 if ( src.first() != dst.first()) { 1607 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1608 } 1609 } 1610 } 1611 } 1612 1613 // Creates an inner frame if one hasn't already been created, and 1614 // saves a copy of the thread in L7_thread_cache 1615 static void create_inner_frame(MacroAssembler* masm, bool* already_created) { 1616 if (!*already_created) { 1617 __ save_frame(0); 1618 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below 1619 // Don't use save_thread because it smashes G2 and we merely want to save a 1620 // copy 1621 __ mov(G2_thread, L7_thread_cache); 1622 *already_created = true; 1623 } 1624 } 1625 1626 1627 static void save_or_restore_arguments(MacroAssembler* masm, 1628 const int stack_slots, 1629 const int total_in_args, 1630 const int arg_save_area, 1631 OopMap* map, 1632 VMRegPair* in_regs, 1633 BasicType* in_sig_bt) { 1634 // if map is non-NULL then the code should store the values, 1635 // otherwise it should load them. 1636 if (map != NULL) { 1637 // Fill in the map 1638 for (int i = 0; i < total_in_args; i++) { 1639 if (in_sig_bt[i] == T_ARRAY) { 1640 if (in_regs[i].first()->is_stack()) { 1641 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1642 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1643 } else if (in_regs[i].first()->is_Register()) { 1644 map->set_oop(in_regs[i].first()); 1645 } else { 1646 ShouldNotReachHere(); 1647 } 1648 } 1649 } 1650 } 1651 1652 // Save or restore double word values 1653 int handle_index = 0; 1654 for (int i = 0; i < total_in_args; i++) { 1655 int slot = handle_index + arg_save_area; 1656 int offset = slot * VMRegImpl::stack_slot_size; 1657 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) { 1658 const Register reg = in_regs[i].first()->as_Register(); 1659 if (reg->is_global()) { 1660 handle_index += 2; 1661 assert(handle_index <= stack_slots, "overflow"); 1662 if (map != NULL) { 1663 __ stx(reg, SP, offset + STACK_BIAS); 1664 } else { 1665 __ ldx(SP, offset + STACK_BIAS, reg); 1666 } 1667 } 1668 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) { 1669 handle_index += 2; 1670 assert(handle_index <= stack_slots, "overflow"); 1671 if (map != NULL) { 1672 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1673 } else { 1674 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1675 } 1676 } 1677 } 1678 // Save floats 1679 for (int i = 0; i < total_in_args; i++) { 1680 int slot = handle_index + arg_save_area; 1681 int offset = slot * VMRegImpl::stack_slot_size; 1682 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) { 1683 handle_index++; 1684 assert(handle_index <= stack_slots, "overflow"); 1685 if (map != NULL) { 1686 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1687 } else { 1688 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1689 } 1690 } 1691 } 1692 1693 } 1694 1695 1696 // Check GC_locker::needs_gc and enter the runtime if it's true. This 1697 // keeps a new JNI critical region from starting until a GC has been 1698 // forced. Save down any oops in registers and describe them in an 1699 // OopMap. 1700 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1701 const int stack_slots, 1702 const int total_in_args, 1703 const int arg_save_area, 1704 OopMapSet* oop_maps, 1705 VMRegPair* in_regs, 1706 BasicType* in_sig_bt) { 1707 __ block_comment("check GC_locker::needs_gc"); 1708 Label cont; 1709 AddressLiteral sync_state(GC_locker::needs_gc_address()); 1710 __ load_bool_contents(sync_state, G3_scratch); 1711 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont); 1712 __ delayed()->nop(); 1713 1714 // Save down any values that are live in registers and call into the 1715 // runtime to halt for a GC 1716 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1717 save_or_restore_arguments(masm, stack_slots, total_in_args, 1718 arg_save_area, map, in_regs, in_sig_bt); 1719 1720 __ mov(G2_thread, L7_thread_cache); 1721 1722 __ set_last_Java_frame(SP, noreg); 1723 1724 __ block_comment("block_for_jni_critical"); 1725 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type); 1726 __ delayed()->mov(L7_thread_cache, O0); 1727 oop_maps->add_gc_map( __ offset(), map); 1728 1729 __ restore_thread(L7_thread_cache); // restore G2_thread 1730 __ reset_last_Java_frame(); 1731 1732 // Reload all the register arguments 1733 save_or_restore_arguments(masm, stack_slots, total_in_args, 1734 arg_save_area, NULL, in_regs, in_sig_bt); 1735 1736 __ bind(cont); 1737 #ifdef ASSERT 1738 if (StressCriticalJNINatives) { 1739 // Stress register saving 1740 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1741 save_or_restore_arguments(masm, stack_slots, total_in_args, 1742 arg_save_area, map, in_regs, in_sig_bt); 1743 // Destroy argument registers 1744 for (int i = 0; i < total_in_args; i++) { 1745 if (in_regs[i].first()->is_Register()) { 1746 const Register reg = in_regs[i].first()->as_Register(); 1747 if (reg->is_global()) { 1748 __ mov(G0, reg); 1749 } 1750 } else if (in_regs[i].first()->is_FloatRegister()) { 1751 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1752 } 1753 } 1754 1755 save_or_restore_arguments(masm, stack_slots, total_in_args, 1756 arg_save_area, NULL, in_regs, in_sig_bt); 1757 } 1758 #endif 1759 } 1760 1761 // Unpack an array argument into a pointer to the body and the length 1762 // if the array is non-null, otherwise pass 0 for both. 1763 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { 1764 // Pass the length, ptr pair 1765 Label is_null, done; 1766 if (reg.first()->is_stack()) { 1767 VMRegPair tmp = reg64_to_VMRegPair(L2); 1768 // Load the arg up from the stack 1769 move_ptr(masm, reg, tmp); 1770 reg = tmp; 1771 } 1772 __ cmp(reg.first()->as_Register(), G0); 1773 __ brx(Assembler::equal, false, Assembler::pt, is_null); 1774 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4); 1775 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg); 1776 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4); 1777 move32_64(masm, reg64_to_VMRegPair(L4), length_arg); 1778 __ ba_short(done); 1779 __ bind(is_null); 1780 // Pass zeros 1781 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg); 1782 move32_64(masm, reg64_to_VMRegPair(G0), length_arg); 1783 __ bind(done); 1784 } 1785 1786 static void verify_oop_args(MacroAssembler* masm, 1787 methodHandle method, 1788 const BasicType* sig_bt, 1789 const VMRegPair* regs) { 1790 Register temp_reg = G5_method; // not part of any compiled calling seq 1791 if (VerifyOops) { 1792 for (int i = 0; i < method->size_of_parameters(); i++) { 1793 if (sig_bt[i] == T_OBJECT || 1794 sig_bt[i] == T_ARRAY) { 1795 VMReg r = regs[i].first(); 1796 assert(r->is_valid(), "bad oop arg"); 1797 if (r->is_stack()) { 1798 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1799 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg); 1800 __ ld_ptr(SP, ld_off, temp_reg); 1801 __ verify_oop(temp_reg); 1802 } else { 1803 __ verify_oop(r->as_Register()); 1804 } 1805 } 1806 } 1807 } 1808 } 1809 1810 static void gen_special_dispatch(MacroAssembler* masm, 1811 methodHandle method, 1812 const BasicType* sig_bt, 1813 const VMRegPair* regs) { 1814 verify_oop_args(masm, method, sig_bt, regs); 1815 vmIntrinsics::ID iid = method->intrinsic_id(); 1816 1817 // Now write the args into the outgoing interpreter space 1818 bool has_receiver = false; 1819 Register receiver_reg = noreg; 1820 int member_arg_pos = -1; 1821 Register member_reg = noreg; 1822 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1823 if (ref_kind != 0) { 1824 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1825 member_reg = G5_method; // known to be free at this point 1826 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1827 } else if (iid == vmIntrinsics::_invokeBasic) { 1828 has_receiver = true; 1829 } else { 1830 fatal(err_msg_res("unexpected intrinsic id %d", iid)); 1831 } 1832 1833 if (member_reg != noreg) { 1834 // Load the member_arg into register, if necessary. 1835 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1836 VMReg r = regs[member_arg_pos].first(); 1837 if (r->is_stack()) { 1838 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1839 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1840 __ ld_ptr(SP, ld_off, member_reg); 1841 } else { 1842 // no data motion is needed 1843 member_reg = r->as_Register(); 1844 } 1845 } 1846 1847 if (has_receiver) { 1848 // Make sure the receiver is loaded into a register. 1849 assert(method->size_of_parameters() > 0, "oob"); 1850 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1851 VMReg r = regs[0].first(); 1852 assert(r->is_valid(), "bad receiver arg"); 1853 if (r->is_stack()) { 1854 // Porting note: This assumes that compiled calling conventions always 1855 // pass the receiver oop in a register. If this is not true on some 1856 // platform, pick a temp and load the receiver from stack. 1857 fatal("receiver always in a register"); 1858 receiver_reg = G3_scratch; // known to be free at this point 1859 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1860 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1861 __ ld_ptr(SP, ld_off, receiver_reg); 1862 } else { 1863 // no data motion is needed 1864 receiver_reg = r->as_Register(); 1865 } 1866 } 1867 1868 // Figure out which address we are really jumping to: 1869 MethodHandles::generate_method_handle_dispatch(masm, iid, 1870 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1871 } 1872 1873 // --------------------------------------------------------------------------- 1874 // Generate a native wrapper for a given method. The method takes arguments 1875 // in the Java compiled code convention, marshals them to the native 1876 // convention (handlizes oops, etc), transitions to native, makes the call, 1877 // returns to java state (possibly blocking), unhandlizes any result and 1878 // returns. 1879 // 1880 // Critical native functions are a shorthand for the use of 1881 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1882 // functions. The wrapper is expected to unpack the arguments before 1883 // passing them to the callee and perform checks before and after the 1884 // native call to ensure that they GC_locker 1885 // lock_critical/unlock_critical semantics are followed. Some other 1886 // parts of JNI setup are skipped like the tear down of the JNI handle 1887 // block and the check for pending exceptions it's impossible for them 1888 // to be thrown. 1889 // 1890 // They are roughly structured like this: 1891 // if (GC_locker::needs_gc()) 1892 // SharedRuntime::block_for_jni_critical(); 1893 // tranistion to thread_in_native 1894 // unpack arrray arguments and call native entry point 1895 // check for safepoint in progress 1896 // check if any thread suspend flags are set 1897 // call into JVM and possible unlock the JNI critical 1898 // if a GC was suppressed while in the critical native. 1899 // transition back to thread_in_Java 1900 // return to caller 1901 // 1902 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1903 methodHandle method, 1904 int compile_id, 1905 BasicType* in_sig_bt, 1906 VMRegPair* in_regs, 1907 BasicType ret_type) { 1908 if (method->is_method_handle_intrinsic()) { 1909 vmIntrinsics::ID iid = method->intrinsic_id(); 1910 intptr_t start = (intptr_t)__ pc(); 1911 int vep_offset = ((intptr_t)__ pc()) - start; 1912 gen_special_dispatch(masm, 1913 method, 1914 in_sig_bt, 1915 in_regs); 1916 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1917 __ flush(); 1918 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1919 return nmethod::new_native_nmethod(method, 1920 compile_id, 1921 masm->code(), 1922 vep_offset, 1923 frame_complete, 1924 stack_slots / VMRegImpl::slots_per_word, 1925 in_ByteSize(-1), 1926 in_ByteSize(-1), 1927 (OopMapSet*)NULL); 1928 } 1929 bool is_critical_native = true; 1930 address native_func = method->critical_native_function(); 1931 if (native_func == NULL) { 1932 native_func = method->native_function(); 1933 is_critical_native = false; 1934 } 1935 assert(native_func != NULL, "must have function"); 1936 1937 // Native nmethod wrappers never take possesion of the oop arguments. 1938 // So the caller will gc the arguments. The only thing we need an 1939 // oopMap for is if the call is static 1940 // 1941 // An OopMap for lock (and class if static), and one for the VM call itself 1942 OopMapSet *oop_maps = new OopMapSet(); 1943 intptr_t start = (intptr_t)__ pc(); 1944 1945 // First thing make an ic check to see if we should even be here 1946 { 1947 Label L; 1948 const Register temp_reg = G3_scratch; 1949 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1950 __ verify_oop(O0); 1951 __ load_klass(O0, temp_reg); 1952 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L); 1953 1954 __ jump_to(ic_miss, temp_reg); 1955 __ delayed()->nop(); 1956 __ align(CodeEntryAlignment); 1957 __ bind(L); 1958 } 1959 1960 int vep_offset = ((intptr_t)__ pc()) - start; 1961 1962 #ifdef COMPILER1 1963 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) { 1964 // Object.hashCode can pull the hashCode from the header word 1965 // instead of doing a full VM transition once it's been computed. 1966 // Since hashCode is usually polymorphic at call sites we can't do 1967 // this optimization at the call site without a lot of work. 1968 Label slowCase; 1969 Register receiver = O0; 1970 Register result = O0; 1971 Register header = G3_scratch; 1972 Register hash = G3_scratch; // overwrite header value with hash value 1973 Register mask = G1; // to get hash field from header 1974 1975 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked. 1976 // We depend on hash_mask being at most 32 bits and avoid the use of 1977 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 1978 // vm: see markOop.hpp. 1979 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header); 1980 __ sethi(markOopDesc::hash_mask, mask); 1981 __ btst(markOopDesc::unlocked_value, header); 1982 __ br(Assembler::zero, false, Assembler::pn, slowCase); 1983 if (UseBiasedLocking) { 1984 // Check if biased and fall through to runtime if so 1985 __ delayed()->nop(); 1986 __ btst(markOopDesc::biased_lock_bit_in_place, header); 1987 __ br(Assembler::notZero, false, Assembler::pn, slowCase); 1988 } 1989 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask); 1990 1991 // Check for a valid (non-zero) hash code and get its value. 1992 #ifdef _LP64 1993 __ srlx(header, markOopDesc::hash_shift, hash); 1994 #else 1995 __ srl(header, markOopDesc::hash_shift, hash); 1996 #endif 1997 __ andcc(hash, mask, hash); 1998 __ br(Assembler::equal, false, Assembler::pn, slowCase); 1999 __ delayed()->nop(); 2000 2001 // leaf return. 2002 __ retl(); 2003 __ delayed()->mov(hash, result); 2004 __ bind(slowCase); 2005 } 2006 #endif // COMPILER1 2007 2008 2009 // We have received a description of where all the java arg are located 2010 // on entry to the wrapper. We need to convert these args to where 2011 // the jni function will expect them. To figure out where they go 2012 // we convert the java signature to a C signature by inserting 2013 // the hidden arguments as arg[0] and possibly arg[1] (static method) 2014 2015 const int total_in_args = method->size_of_parameters(); 2016 int total_c_args = total_in_args; 2017 int total_save_slots = 6 * VMRegImpl::slots_per_word; 2018 if (!is_critical_native) { 2019 total_c_args += 1; 2020 if (method->is_static()) { 2021 total_c_args++; 2022 } 2023 } else { 2024 for (int i = 0; i < total_in_args; i++) { 2025 if (in_sig_bt[i] == T_ARRAY) { 2026 // These have to be saved and restored across the safepoint 2027 total_c_args++; 2028 } 2029 } 2030 } 2031 2032 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 2033 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 2034 BasicType* in_elem_bt = NULL; 2035 2036 int argc = 0; 2037 if (!is_critical_native) { 2038 out_sig_bt[argc++] = T_ADDRESS; 2039 if (method->is_static()) { 2040 out_sig_bt[argc++] = T_OBJECT; 2041 } 2042 2043 for (int i = 0; i < total_in_args ; i++ ) { 2044 out_sig_bt[argc++] = in_sig_bt[i]; 2045 } 2046 } else { 2047 Thread* THREAD = Thread::current(); 2048 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); 2049 SignatureStream ss(method->signature()); 2050 for (int i = 0; i < total_in_args ; i++ ) { 2051 if (in_sig_bt[i] == T_ARRAY) { 2052 // Arrays are passed as int, elem* pair 2053 out_sig_bt[argc++] = T_INT; 2054 out_sig_bt[argc++] = T_ADDRESS; 2055 Symbol* atype = ss.as_symbol(CHECK_NULL); 2056 const char* at = atype->as_C_string(); 2057 if (strlen(at) == 2) { 2058 assert(at[0] == '[', "must be"); 2059 switch (at[1]) { 2060 case 'B': in_elem_bt[i] = T_BYTE; break; 2061 case 'C': in_elem_bt[i] = T_CHAR; break; 2062 case 'D': in_elem_bt[i] = T_DOUBLE; break; 2063 case 'F': in_elem_bt[i] = T_FLOAT; break; 2064 case 'I': in_elem_bt[i] = T_INT; break; 2065 case 'J': in_elem_bt[i] = T_LONG; break; 2066 case 'S': in_elem_bt[i] = T_SHORT; break; 2067 case 'Z': in_elem_bt[i] = T_BOOLEAN; break; 2068 default: ShouldNotReachHere(); 2069 } 2070 } 2071 } else { 2072 out_sig_bt[argc++] = in_sig_bt[i]; 2073 in_elem_bt[i] = T_VOID; 2074 } 2075 if (in_sig_bt[i] != T_VOID) { 2076 assert(in_sig_bt[i] == ss.type(), "must match"); 2077 ss.next(); 2078 } 2079 } 2080 } 2081 2082 // Now figure out where the args must be stored and how much stack space 2083 // they require (neglecting out_preserve_stack_slots but space for storing 2084 // the 1st six register arguments). It's weird see int_stk_helper. 2085 // 2086 int out_arg_slots; 2087 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 2088 2089 if (is_critical_native) { 2090 // Critical natives may have to call out so they need a save area 2091 // for register arguments. 2092 int double_slots = 0; 2093 int single_slots = 0; 2094 for ( int i = 0; i < total_in_args; i++) { 2095 if (in_regs[i].first()->is_Register()) { 2096 const Register reg = in_regs[i].first()->as_Register(); 2097 switch (in_sig_bt[i]) { 2098 case T_ARRAY: 2099 case T_BOOLEAN: 2100 case T_BYTE: 2101 case T_SHORT: 2102 case T_CHAR: 2103 case T_INT: assert(reg->is_in(), "don't need to save these"); break; 2104 case T_LONG: if (reg->is_global()) double_slots++; break; 2105 default: ShouldNotReachHere(); 2106 } 2107 } else if (in_regs[i].first()->is_FloatRegister()) { 2108 switch (in_sig_bt[i]) { 2109 case T_FLOAT: single_slots++; break; 2110 case T_DOUBLE: double_slots++; break; 2111 default: ShouldNotReachHere(); 2112 } 2113 } 2114 } 2115 total_save_slots = double_slots * 2 + single_slots; 2116 } 2117 2118 // Compute framesize for the wrapper. We need to handlize all oops in 2119 // registers. We must create space for them here that is disjoint from 2120 // the windowed save area because we have no control over when we might 2121 // flush the window again and overwrite values that gc has since modified. 2122 // (The live window race) 2123 // 2124 // We always just allocate 6 word for storing down these object. This allow 2125 // us to simply record the base and use the Ireg number to decide which 2126 // slot to use. (Note that the reg number is the inbound number not the 2127 // outbound number). 2128 // We must shuffle args to match the native convention, and include var-args space. 2129 2130 // Calculate the total number of stack slots we will need. 2131 2132 // First count the abi requirement plus all of the outgoing args 2133 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2134 2135 // Now the space for the inbound oop handle area 2136 2137 int oop_handle_offset = round_to(stack_slots, 2); 2138 stack_slots += total_save_slots; 2139 2140 // Now any space we need for handlizing a klass if static method 2141 2142 int klass_slot_offset = 0; 2143 int klass_offset = -1; 2144 int lock_slot_offset = 0; 2145 bool is_static = false; 2146 2147 if (method->is_static()) { 2148 klass_slot_offset = stack_slots; 2149 stack_slots += VMRegImpl::slots_per_word; 2150 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2151 is_static = true; 2152 } 2153 2154 // Plus a lock if needed 2155 2156 if (method->is_synchronized()) { 2157 lock_slot_offset = stack_slots; 2158 stack_slots += VMRegImpl::slots_per_word; 2159 } 2160 2161 // Now a place to save return value or as a temporary for any gpr -> fpr moves 2162 stack_slots += 2; 2163 2164 // Ok The space we have allocated will look like: 2165 // 2166 // 2167 // FP-> | | 2168 // |---------------------| 2169 // | 2 slots for moves | 2170 // |---------------------| 2171 // | lock box (if sync) | 2172 // |---------------------| <- lock_slot_offset 2173 // | klass (if static) | 2174 // |---------------------| <- klass_slot_offset 2175 // | oopHandle area | 2176 // |---------------------| <- oop_handle_offset 2177 // | outbound memory | 2178 // | based arguments | 2179 // | | 2180 // |---------------------| 2181 // | vararg area | 2182 // |---------------------| 2183 // | | 2184 // SP-> | out_preserved_slots | 2185 // 2186 // 2187 2188 2189 // Now compute actual number of stack words we need rounding to make 2190 // stack properly aligned. 2191 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word); 2192 2193 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2194 2195 // Generate stack overflow check before creating frame 2196 __ generate_stack_overflow_check(stack_size); 2197 2198 // Generate a new frame for the wrapper. 2199 __ save(SP, -stack_size, SP); 2200 2201 int frame_complete = ((intptr_t)__ pc()) - start; 2202 2203 __ verify_thread(); 2204 2205 if (is_critical_native) { 2206 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, 2207 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 2208 } 2209 2210 // 2211 // We immediately shuffle the arguments so that any vm call we have to 2212 // make from here on out (sync slow path, jvmti, etc.) we will have 2213 // captured the oops from our caller and have a valid oopMap for 2214 // them. 2215 2216 // ----------------- 2217 // The Grand Shuffle 2218 // 2219 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2220 // (derived from JavaThread* which is in L7_thread_cache) and, if static, 2221 // the class mirror instead of a receiver. This pretty much guarantees that 2222 // register layout will not match. We ignore these extra arguments during 2223 // the shuffle. The shuffle is described by the two calling convention 2224 // vectors we have in our possession. We simply walk the java vector to 2225 // get the source locations and the c vector to get the destinations. 2226 // Because we have a new window and the argument registers are completely 2227 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about 2228 // here. 2229 2230 // This is a trick. We double the stack slots so we can claim 2231 // the oops in the caller's frame. Since we are sure to have 2232 // more args than the caller doubling is enough to make 2233 // sure we can capture all the incoming oop args from the 2234 // caller. 2235 // 2236 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2237 // Record sp-based slot for receiver on stack for non-static methods 2238 int receiver_offset = -1; 2239 2240 // We move the arguments backward because the floating point registers 2241 // destination will always be to a register with a greater or equal register 2242 // number or the stack. 2243 2244 #ifdef ASSERT 2245 bool reg_destroyed[RegisterImpl::number_of_registers]; 2246 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2247 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2248 reg_destroyed[r] = false; 2249 } 2250 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2251 freg_destroyed[f] = false; 2252 } 2253 2254 #endif /* ASSERT */ 2255 2256 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) { 2257 2258 #ifdef ASSERT 2259 if (in_regs[i].first()->is_Register()) { 2260 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2261 } else if (in_regs[i].first()->is_FloatRegister()) { 2262 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!"); 2263 } 2264 if (out_regs[c_arg].first()->is_Register()) { 2265 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2266 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 2267 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true; 2268 } 2269 #endif /* ASSERT */ 2270 2271 switch (in_sig_bt[i]) { 2272 case T_ARRAY: 2273 if (is_critical_native) { 2274 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]); 2275 c_arg--; 2276 break; 2277 } 2278 case T_OBJECT: 2279 assert(!is_critical_native, "no oop arguments"); 2280 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2281 ((i == 0) && (!is_static)), 2282 &receiver_offset); 2283 break; 2284 case T_VOID: 2285 break; 2286 2287 case T_FLOAT: 2288 float_move(masm, in_regs[i], out_regs[c_arg]); 2289 break; 2290 2291 case T_DOUBLE: 2292 assert( i + 1 < total_in_args && 2293 in_sig_bt[i + 1] == T_VOID && 2294 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2295 double_move(masm, in_regs[i], out_regs[c_arg]); 2296 break; 2297 2298 case T_LONG : 2299 long_move(masm, in_regs[i], out_regs[c_arg]); 2300 break; 2301 2302 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2303 2304 default: 2305 move32_64(masm, in_regs[i], out_regs[c_arg]); 2306 } 2307 } 2308 2309 // Pre-load a static method's oop into O1. Used both by locking code and 2310 // the normal JNI call code. 2311 if (method->is_static() && !is_critical_native) { 2312 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1); 2313 2314 // Now handlize the static class mirror in O1. It's known not-null. 2315 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2316 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2317 __ add(SP, klass_offset + STACK_BIAS, O1); 2318 } 2319 2320 2321 const Register L6_handle = L6; 2322 2323 if (method->is_synchronized()) { 2324 assert(!is_critical_native, "unhandled"); 2325 __ mov(O1, L6_handle); 2326 } 2327 2328 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2329 // except O6/O7. So if we must call out we must push a new frame. We immediately 2330 // push a new frame and flush the windows. 2331 #ifdef _LP64 2332 intptr_t thepc = (intptr_t) __ pc(); 2333 { 2334 address here = __ pc(); 2335 // Call the next instruction 2336 __ call(here + 8, relocInfo::none); 2337 __ delayed()->nop(); 2338 } 2339 #else 2340 intptr_t thepc = __ load_pc_address(O7, 0); 2341 #endif /* _LP64 */ 2342 2343 // We use the same pc/oopMap repeatedly when we call out 2344 oop_maps->add_gc_map(thepc - start, map); 2345 2346 // O7 now has the pc loaded that we will use when we finally call to native. 2347 2348 // Save thread in L7; it crosses a bunch of VM calls below 2349 // Don't use save_thread because it smashes G2 and we merely 2350 // want to save a copy 2351 __ mov(G2_thread, L7_thread_cache); 2352 2353 2354 // If we create an inner frame once is plenty 2355 // when we create it we must also save G2_thread 2356 bool inner_frame_created = false; 2357 2358 // dtrace method entry support 2359 { 2360 SkipIfEqual skip_if( 2361 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2362 // create inner frame 2363 __ save_frame(0); 2364 __ mov(G2_thread, L7_thread_cache); 2365 __ set_metadata_constant(method(), O1); 2366 __ call_VM_leaf(L7_thread_cache, 2367 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2368 G2_thread, O1); 2369 __ restore(); 2370 } 2371 2372 // RedefineClasses() tracing support for obsolete method entry 2373 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2374 // create inner frame 2375 __ save_frame(0); 2376 __ mov(G2_thread, L7_thread_cache); 2377 __ set_metadata_constant(method(), O1); 2378 __ call_VM_leaf(L7_thread_cache, 2379 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2380 G2_thread, O1); 2381 __ restore(); 2382 } 2383 2384 // We are in the jni frame unless saved_frame is true in which case 2385 // we are in one frame deeper (the "inner" frame). If we are in the 2386 // "inner" frames the args are in the Iregs and if the jni frame then 2387 // they are in the Oregs. 2388 // If we ever need to go to the VM (for locking, jvmti) then 2389 // we will always be in the "inner" frame. 2390 2391 // Lock a synchronized method 2392 int lock_offset = -1; // Set if locked 2393 if (method->is_synchronized()) { 2394 Register Roop = O1; 2395 const Register L3_box = L3; 2396 2397 create_inner_frame(masm, &inner_frame_created); 2398 2399 __ ld_ptr(I1, 0, O1); 2400 Label done; 2401 2402 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size); 2403 __ add(FP, lock_offset+STACK_BIAS, L3_box); 2404 #ifdef ASSERT 2405 if (UseBiasedLocking) { 2406 // making the box point to itself will make it clear it went unused 2407 // but also be obviously invalid 2408 __ st_ptr(L3_box, L3_box, 0); 2409 } 2410 #endif // ASSERT 2411 // 2412 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch 2413 // 2414 __ compiler_lock_object(Roop, L1, L3_box, L2); 2415 __ br(Assembler::equal, false, Assembler::pt, done); 2416 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box); 2417 2418 2419 // None of the above fast optimizations worked so we have to get into the 2420 // slow case of monitor enter. Inline a special case of call_VM that 2421 // disallows any pending_exception. 2422 __ mov(Roop, O0); // Need oop in O0 2423 __ mov(L3_box, O1); 2424 2425 // Record last_Java_sp, in case the VM code releases the JVM lock. 2426 2427 __ set_last_Java_frame(FP, I7); 2428 2429 // do the call 2430 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2431 __ delayed()->mov(L7_thread_cache, O2); 2432 2433 __ restore_thread(L7_thread_cache); // restore G2_thread 2434 __ reset_last_Java_frame(); 2435 2436 #ifdef ASSERT 2437 { Label L; 2438 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2439 __ br_null_short(O0, Assembler::pt, L); 2440 __ stop("no pending exception allowed on exit from IR::monitorenter"); 2441 __ bind(L); 2442 } 2443 #endif 2444 __ bind(done); 2445 } 2446 2447 2448 // Finally just about ready to make the JNI call 2449 2450 __ flushw(); 2451 if (inner_frame_created) { 2452 __ restore(); 2453 } else { 2454 // Store only what we need from this frame 2455 // QQQ I think that non-v9 (like we care) we don't need these saves 2456 // either as the flush traps and the current window goes too. 2457 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2458 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2459 } 2460 2461 // get JNIEnv* which is first argument to native 2462 if (!is_critical_native) { 2463 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2464 } 2465 2466 // Use that pc we placed in O7 a while back as the current frame anchor 2467 __ set_last_Java_frame(SP, O7); 2468 2469 // We flushed the windows ages ago now mark them as flushed before transitioning. 2470 __ set(JavaFrameAnchor::flushed, G3_scratch); 2471 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 2472 2473 // Transition from _thread_in_Java to _thread_in_native. 2474 __ set(_thread_in_native, G3_scratch); 2475 2476 #ifdef _LP64 2477 AddressLiteral dest(native_func); 2478 __ relocate(relocInfo::runtime_call_type); 2479 __ jumpl_to(dest, O7, O7); 2480 #else 2481 __ call(native_func, relocInfo::runtime_call_type); 2482 #endif 2483 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2484 2485 __ restore_thread(L7_thread_cache); // restore G2_thread 2486 2487 // Unpack native results. For int-types, we do any needed sign-extension 2488 // and move things into I0. The return value there will survive any VM 2489 // calls for blocking or unlocking. An FP or OOP result (handle) is done 2490 // specially in the slow-path code. 2491 switch (ret_type) { 2492 case T_VOID: break; // Nothing to do! 2493 case T_FLOAT: break; // Got it where we want it (unless slow-path) 2494 case T_DOUBLE: break; // Got it where we want it (unless slow-path) 2495 // In 64 bits build result is in O0, in O0, O1 in 32bit build 2496 case T_LONG: 2497 #ifndef _LP64 2498 __ mov(O1, I1); 2499 #endif 2500 // Fall thru 2501 case T_OBJECT: // Really a handle 2502 case T_ARRAY: 2503 case T_INT: 2504 __ mov(O0, I0); 2505 break; 2506 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false 2507 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break; 2508 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value! 2509 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break; 2510 break; // Cannot de-handlize until after reclaiming jvm_lock 2511 default: 2512 ShouldNotReachHere(); 2513 } 2514 2515 Label after_transition; 2516 // must we block? 2517 2518 // Block, if necessary, before resuming in _thread_in_Java state. 2519 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2520 { Label no_block; 2521 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 2522 2523 // Switch thread to "native transition" state before reading the synchronization state. 2524 // This additional state is necessary because reading and testing the synchronization 2525 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2526 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2527 // VM thread changes sync state to synchronizing and suspends threads for GC. 2528 // Thread A is resumed to finish this native method, but doesn't block here since it 2529 // didn't see any synchronization is progress, and escapes. 2530 __ set(_thread_in_native_trans, G3_scratch); 2531 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2532 if(os::is_MP()) { 2533 if (UseMembar) { 2534 // Force this write out before the read below 2535 __ membar(Assembler::StoreLoad); 2536 } else { 2537 // Write serialization page so VM thread can do a pseudo remote membar. 2538 // We use the current thread pointer to calculate a thread specific 2539 // offset to write to within the page. This minimizes bus traffic 2540 // due to cache line collision. 2541 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 2542 } 2543 } 2544 __ load_contents(sync_state, G3_scratch); 2545 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 2546 2547 Label L; 2548 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); 2549 __ br(Assembler::notEqual, false, Assembler::pn, L); 2550 __ delayed()->ld(suspend_state, G3_scratch); 2551 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 2552 __ bind(L); 2553 2554 // Block. Save any potential method result value before the operation and 2555 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2556 // lets us share the oopMap we used when we went native rather the create 2557 // a distinct one for this pc 2558 // 2559 save_native_result(masm, ret_type, stack_slots); 2560 if (!is_critical_native) { 2561 __ call_VM_leaf(L7_thread_cache, 2562 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2563 G2_thread); 2564 } else { 2565 __ call_VM_leaf(L7_thread_cache, 2566 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), 2567 G2_thread); 2568 } 2569 2570 // Restore any method result value 2571 restore_native_result(masm, ret_type, stack_slots); 2572 2573 if (is_critical_native) { 2574 // The call above performed the transition to thread_in_Java so 2575 // skip the transition logic below. 2576 __ ba(after_transition); 2577 __ delayed()->nop(); 2578 } 2579 2580 __ bind(no_block); 2581 } 2582 2583 // thread state is thread_in_native_trans. Any safepoint blocking has already 2584 // happened so we can now change state to _thread_in_Java. 2585 __ set(_thread_in_Java, G3_scratch); 2586 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2587 __ bind(after_transition); 2588 2589 Label no_reguard; 2590 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch); 2591 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard); 2592 2593 save_native_result(masm, ret_type, stack_slots); 2594 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2595 __ delayed()->nop(); 2596 2597 __ restore_thread(L7_thread_cache); // restore G2_thread 2598 restore_native_result(masm, ret_type, stack_slots); 2599 2600 __ bind(no_reguard); 2601 2602 // Handle possible exception (will unlock if necessary) 2603 2604 // native result if any is live in freg or I0 (and I1 if long and 32bit vm) 2605 2606 // Unlock 2607 if (method->is_synchronized()) { 2608 Label done; 2609 Register I2_ex_oop = I2; 2610 const Register L3_box = L3; 2611 // Get locked oop from the handle we passed to jni 2612 __ ld_ptr(L6_handle, 0, L4); 2613 __ add(SP, lock_offset+STACK_BIAS, L3_box); 2614 // Must save pending exception around the slow-path VM call. Since it's a 2615 // leaf call, the pending exception (if any) can be kept in a register. 2616 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop); 2617 // Now unlock 2618 // (Roop, Rmark, Rbox, Rscratch) 2619 __ compiler_unlock_object(L4, L1, L3_box, L2); 2620 __ br(Assembler::equal, false, Assembler::pt, done); 2621 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box); 2622 2623 // save and restore any potential method result value around the unlocking 2624 // operation. Will save in I0 (or stack for FP returns). 2625 save_native_result(masm, ret_type, stack_slots); 2626 2627 // Must clear pending-exception before re-entering the VM. Since this is 2628 // a leaf call, pending-exception-oop can be safely kept in a register. 2629 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset())); 2630 2631 // slow case of monitor enter. Inline a special case of call_VM that 2632 // disallows any pending_exception. 2633 __ mov(L3_box, O1); 2634 2635 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type); 2636 __ delayed()->mov(L4, O0); // Need oop in O0 2637 2638 __ restore_thread(L7_thread_cache); // restore G2_thread 2639 2640 #ifdef ASSERT 2641 { Label L; 2642 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2643 __ br_null_short(O0, Assembler::pt, L); 2644 __ stop("no pending exception allowed on exit from IR::monitorexit"); 2645 __ bind(L); 2646 } 2647 #endif 2648 restore_native_result(masm, ret_type, stack_slots); 2649 // check_forward_pending_exception jump to forward_exception if any pending 2650 // exception is set. The forward_exception routine expects to see the 2651 // exception in pending_exception and not in a register. Kind of clumsy, 2652 // since all folks who branch to forward_exception must have tested 2653 // pending_exception first and hence have it in a register already. 2654 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset())); 2655 __ bind(done); 2656 } 2657 2658 // Tell dtrace about this method exit 2659 { 2660 SkipIfEqual skip_if( 2661 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2662 save_native_result(masm, ret_type, stack_slots); 2663 __ set_metadata_constant(method(), O1); 2664 __ call_VM_leaf(L7_thread_cache, 2665 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2666 G2_thread, O1); 2667 restore_native_result(masm, ret_type, stack_slots); 2668 } 2669 2670 // Clear "last Java frame" SP and PC. 2671 __ verify_thread(); // G2_thread must be correct 2672 __ reset_last_Java_frame(); 2673 2674 // Unpack oop result 2675 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2676 Label L; 2677 __ addcc(G0, I0, G0); 2678 __ brx(Assembler::notZero, true, Assembler::pt, L); 2679 __ delayed()->ld_ptr(I0, 0, I0); 2680 __ mov(G0, I0); 2681 __ bind(L); 2682 __ verify_oop(I0); 2683 } 2684 2685 if (!is_critical_native) { 2686 // reset handle block 2687 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2688 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2689 2690 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2691 check_forward_pending_exception(masm, G3_scratch); 2692 } 2693 2694 2695 // Return 2696 2697 #ifndef _LP64 2698 if (ret_type == T_LONG) { 2699 2700 // Must leave proper result in O0,O1 and G1 (c2/tiered only) 2701 __ sllx(I0, 32, G1); // Shift bits into high G1 2702 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 2703 __ or3 (I1, G1, G1); // OR 64 bits into G1 2704 } 2705 #endif 2706 2707 __ ret(); 2708 __ delayed()->restore(); 2709 2710 __ flush(); 2711 2712 nmethod *nm = nmethod::new_native_nmethod(method, 2713 compile_id, 2714 masm->code(), 2715 vep_offset, 2716 frame_complete, 2717 stack_slots / VMRegImpl::slots_per_word, 2718 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2719 in_ByteSize(lock_offset), 2720 oop_maps); 2721 2722 if (is_critical_native) { 2723 nm->set_lazy_critical_native(true); 2724 } 2725 return nm; 2726 2727 } 2728 2729 #ifdef HAVE_DTRACE_H 2730 // --------------------------------------------------------------------------- 2731 // Generate a dtrace nmethod for a given signature. The method takes arguments 2732 // in the Java compiled code convention, marshals them to the native 2733 // abi and then leaves nops at the position you would expect to call a native 2734 // function. When the probe is enabled the nops are replaced with a trap 2735 // instruction that dtrace inserts and the trace will cause a notification 2736 // to dtrace. 2737 // 2738 // The probes are only able to take primitive types and java/lang/String as 2739 // arguments. No other java types are allowed. Strings are converted to utf8 2740 // strings so that from dtrace point of view java strings are converted to C 2741 // strings. There is an arbitrary fixed limit on the total space that a method 2742 // can use for converting the strings. (256 chars per string in the signature). 2743 // So any java string larger then this is truncated. 2744 2745 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 }; 2746 static bool offsets_initialized = false; 2747 2748 nmethod *SharedRuntime::generate_dtrace_nmethod( 2749 MacroAssembler *masm, methodHandle method) { 2750 2751 2752 // generate_dtrace_nmethod is guarded by a mutex so we are sure to 2753 // be single threaded in this method. 2754 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be"); 2755 2756 // Fill in the signature array, for the calling-convention call. 2757 int total_args_passed = method->size_of_parameters(); 2758 2759 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); 2760 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); 2761 2762 // The signature we are going to use for the trap that dtrace will see 2763 // java/lang/String is converted. We drop "this" and any other object 2764 // is converted to NULL. (A one-slot java/lang/Long object reference 2765 // is converted to a two-slot long, which is why we double the allocation). 2766 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2); 2767 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2); 2768 2769 int i=0; 2770 int total_strings = 0; 2771 int first_arg_to_pass = 0; 2772 int total_c_args = 0; 2773 2774 // Skip the receiver as dtrace doesn't want to see it 2775 if( !method->is_static() ) { 2776 in_sig_bt[i++] = T_OBJECT; 2777 first_arg_to_pass = 1; 2778 } 2779 2780 SignatureStream ss(method->signature()); 2781 for ( ; !ss.at_return_type(); ss.next()) { 2782 BasicType bt = ss.type(); 2783 in_sig_bt[i++] = bt; // Collect remaining bits of signature 2784 out_sig_bt[total_c_args++] = bt; 2785 if( bt == T_OBJECT) { 2786 Symbol* s = ss.as_symbol_or_null(); 2787 if (s == vmSymbols::java_lang_String()) { 2788 total_strings++; 2789 out_sig_bt[total_c_args-1] = T_ADDRESS; 2790 } else if (s == vmSymbols::java_lang_Boolean() || 2791 s == vmSymbols::java_lang_Byte()) { 2792 out_sig_bt[total_c_args-1] = T_BYTE; 2793 } else if (s == vmSymbols::java_lang_Character() || 2794 s == vmSymbols::java_lang_Short()) { 2795 out_sig_bt[total_c_args-1] = T_SHORT; 2796 } else if (s == vmSymbols::java_lang_Integer() || 2797 s == vmSymbols::java_lang_Float()) { 2798 out_sig_bt[total_c_args-1] = T_INT; 2799 } else if (s == vmSymbols::java_lang_Long() || 2800 s == vmSymbols::java_lang_Double()) { 2801 out_sig_bt[total_c_args-1] = T_LONG; 2802 out_sig_bt[total_c_args++] = T_VOID; 2803 } 2804 } else if ( bt == T_LONG || bt == T_DOUBLE ) { 2805 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots 2806 // We convert double to long 2807 out_sig_bt[total_c_args-1] = T_LONG; 2808 out_sig_bt[total_c_args++] = T_VOID; 2809 } else if ( bt == T_FLOAT) { 2810 // We convert float to int 2811 out_sig_bt[total_c_args-1] = T_INT; 2812 } 2813 } 2814 2815 assert(i==total_args_passed, "validly parsed signature"); 2816 2817 // Now get the compiled-Java layout as input arguments 2818 int comp_args_on_stack; 2819 comp_args_on_stack = SharedRuntime::java_calling_convention( 2820 in_sig_bt, in_regs, total_args_passed, false); 2821 2822 // We have received a description of where all the java arg are located 2823 // on entry to the wrapper. We need to convert these args to where 2824 // the a native (non-jni) function would expect them. To figure out 2825 // where they go we convert the java signature to a C signature and remove 2826 // T_VOID for any long/double we might have received. 2827 2828 2829 // Now figure out where the args must be stored and how much stack space 2830 // they require (neglecting out_preserve_stack_slots but space for storing 2831 // the 1st six register arguments). It's weird see int_stk_helper. 2832 // 2833 int out_arg_slots; 2834 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 2835 2836 // Calculate the total number of stack slots we will need. 2837 2838 // First count the abi requirement plus all of the outgoing args 2839 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2840 2841 // Plus a temp for possible converion of float/double/long register args 2842 2843 int conversion_temp = stack_slots; 2844 stack_slots += 2; 2845 2846 2847 // Now space for the string(s) we must convert 2848 2849 int string_locs = stack_slots; 2850 stack_slots += total_strings * 2851 (max_dtrace_string_size / VMRegImpl::stack_slot_size); 2852 2853 // Ok The space we have allocated will look like: 2854 // 2855 // 2856 // FP-> | | 2857 // |---------------------| 2858 // | string[n] | 2859 // |---------------------| <- string_locs[n] 2860 // | string[n-1] | 2861 // |---------------------| <- string_locs[n-1] 2862 // | ... | 2863 // | ... | 2864 // |---------------------| <- string_locs[1] 2865 // | string[0] | 2866 // |---------------------| <- string_locs[0] 2867 // | temp | 2868 // |---------------------| <- conversion_temp 2869 // | outbound memory | 2870 // | based arguments | 2871 // | | 2872 // |---------------------| 2873 // | | 2874 // SP-> | out_preserved_slots | 2875 // 2876 // 2877 2878 // Now compute actual number of stack words we need rounding to make 2879 // stack properly aligned. 2880 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word); 2881 2882 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2883 2884 intptr_t start = (intptr_t)__ pc(); 2885 2886 // First thing make an ic check to see if we should even be here 2887 2888 { 2889 Label L; 2890 const Register temp_reg = G3_scratch; 2891 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 2892 __ verify_oop(O0); 2893 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg); 2894 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L); 2895 2896 __ jump_to(ic_miss, temp_reg); 2897 __ delayed()->nop(); 2898 __ align(CodeEntryAlignment); 2899 __ bind(L); 2900 } 2901 2902 int vep_offset = ((intptr_t)__ pc()) - start; 2903 2904 2905 // The instruction at the verified entry point must be 5 bytes or longer 2906 // because it can be patched on the fly by make_non_entrant. The stack bang 2907 // instruction fits that requirement. 2908 2909 // Generate stack overflow check before creating frame 2910 __ generate_stack_overflow_check(stack_size); 2911 2912 assert(((intptr_t)__ pc() - start - vep_offset) >= 5, 2913 "valid size for make_non_entrant"); 2914 2915 // Generate a new frame for the wrapper. 2916 __ save(SP, -stack_size, SP); 2917 2918 // Frame is now completed as far a size and linkage. 2919 2920 int frame_complete = ((intptr_t)__ pc()) - start; 2921 2922 #ifdef ASSERT 2923 bool reg_destroyed[RegisterImpl::number_of_registers]; 2924 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2925 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2926 reg_destroyed[r] = false; 2927 } 2928 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2929 freg_destroyed[f] = false; 2930 } 2931 2932 #endif /* ASSERT */ 2933 2934 VMRegPair zero; 2935 const Register g0 = G0; // without this we get a compiler warning (why??) 2936 zero.set2(g0->as_VMReg()); 2937 2938 int c_arg, j_arg; 2939 2940 Register conversion_off = noreg; 2941 2942 for (j_arg = first_arg_to_pass, c_arg = 0 ; 2943 j_arg < total_args_passed ; j_arg++, c_arg++ ) { 2944 2945 VMRegPair src = in_regs[j_arg]; 2946 VMRegPair dst = out_regs[c_arg]; 2947 2948 #ifdef ASSERT 2949 if (src.first()->is_Register()) { 2950 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!"); 2951 } else if (src.first()->is_FloatRegister()) { 2952 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding( 2953 FloatRegisterImpl::S)], "ack!"); 2954 } 2955 if (dst.first()->is_Register()) { 2956 reg_destroyed[dst.first()->as_Register()->encoding()] = true; 2957 } else if (dst.first()->is_FloatRegister()) { 2958 freg_destroyed[dst.first()->as_FloatRegister()->encoding( 2959 FloatRegisterImpl::S)] = true; 2960 } 2961 #endif /* ASSERT */ 2962 2963 switch (in_sig_bt[j_arg]) { 2964 case T_ARRAY: 2965 case T_OBJECT: 2966 { 2967 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT || 2968 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) { 2969 // need to unbox a one-slot value 2970 Register in_reg = L0; 2971 Register tmp = L2; 2972 if ( src.first()->is_reg() ) { 2973 in_reg = src.first()->as_Register(); 2974 } else { 2975 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS), 2976 "must be"); 2977 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg); 2978 } 2979 // If the final destination is an acceptable register 2980 if ( dst.first()->is_reg() ) { 2981 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) { 2982 tmp = dst.first()->as_Register(); 2983 } 2984 } 2985 2986 Label skipUnbox; 2987 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) { 2988 __ mov(G0, tmp->successor()); 2989 } 2990 __ br_null(in_reg, true, Assembler::pn, skipUnbox); 2991 __ delayed()->mov(G0, tmp); 2992 2993 BasicType bt = out_sig_bt[c_arg]; 2994 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt); 2995 switch (bt) { 2996 case T_BYTE: 2997 __ ldub(in_reg, box_offset, tmp); break; 2998 case T_SHORT: 2999 __ lduh(in_reg, box_offset, tmp); break; 3000 case T_INT: 3001 __ ld(in_reg, box_offset, tmp); break; 3002 case T_LONG: 3003 __ ld_long(in_reg, box_offset, tmp); break; 3004 default: ShouldNotReachHere(); 3005 } 3006 3007 __ bind(skipUnbox); 3008 // If tmp wasn't final destination copy to final destination 3009 if (tmp == L2) { 3010 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2); 3011 if (out_sig_bt[c_arg] == T_LONG) { 3012 long_move(masm, tmp_as_VM, dst); 3013 } else { 3014 move32_64(masm, tmp_as_VM, out_regs[c_arg]); 3015 } 3016 } 3017 if (out_sig_bt[c_arg] == T_LONG) { 3018 assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); 3019 ++c_arg; // move over the T_VOID to keep the loop indices in sync 3020 } 3021 } else if (out_sig_bt[c_arg] == T_ADDRESS) { 3022 Register s = 3023 src.first()->is_reg() ? src.first()->as_Register() : L2; 3024 Register d = 3025 dst.first()->is_reg() ? dst.first()->as_Register() : L2; 3026 3027 // We store the oop now so that the conversion pass can reach 3028 // while in the inner frame. This will be the only store if 3029 // the oop is NULL. 3030 if (s != L2) { 3031 // src is register 3032 if (d != L2) { 3033 // dst is register 3034 __ mov(s, d); 3035 } else { 3036 assert(Assembler::is_simm13(reg2offset(dst.first()) + 3037 STACK_BIAS), "must be"); 3038 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS); 3039 } 3040 } else { 3041 // src not a register 3042 assert(Assembler::is_simm13(reg2offset(src.first()) + 3043 STACK_BIAS), "must be"); 3044 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d); 3045 if (d == L2) { 3046 assert(Assembler::is_simm13(reg2offset(dst.first()) + 3047 STACK_BIAS), "must be"); 3048 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS); 3049 } 3050 } 3051 } else if (out_sig_bt[c_arg] != T_VOID) { 3052 // Convert the arg to NULL 3053 if (dst.first()->is_reg()) { 3054 __ mov(G0, dst.first()->as_Register()); 3055 } else { 3056 assert(Assembler::is_simm13(reg2offset(dst.first()) + 3057 STACK_BIAS), "must be"); 3058 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS); 3059 } 3060 } 3061 } 3062 break; 3063 case T_VOID: 3064 break; 3065 3066 case T_FLOAT: 3067 if (src.first()->is_stack()) { 3068 // Stack to stack/reg is simple 3069 move32_64(masm, src, dst); 3070 } else { 3071 if (dst.first()->is_reg()) { 3072 // freg -> reg 3073 int off = 3074 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 3075 Register d = dst.first()->as_Register(); 3076 if (Assembler::is_simm13(off)) { 3077 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 3078 SP, off); 3079 __ ld(SP, off, d); 3080 } else { 3081 if (conversion_off == noreg) { 3082 __ set(off, L6); 3083 conversion_off = L6; 3084 } 3085 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 3086 SP, conversion_off); 3087 __ ld(SP, conversion_off , d); 3088 } 3089 } else { 3090 // freg -> mem 3091 int off = STACK_BIAS + reg2offset(dst.first()); 3092 if (Assembler::is_simm13(off)) { 3093 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 3094 SP, off); 3095 } else { 3096 if (conversion_off == noreg) { 3097 __ set(off, L6); 3098 conversion_off = L6; 3099 } 3100 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 3101 SP, conversion_off); 3102 } 3103 } 3104 } 3105 break; 3106 3107 case T_DOUBLE: 3108 assert( j_arg + 1 < total_args_passed && 3109 in_sig_bt[j_arg + 1] == T_VOID && 3110 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 3111 if (src.first()->is_stack()) { 3112 // Stack to stack/reg is simple 3113 long_move(masm, src, dst); 3114 } else { 3115 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2; 3116 3117 // Destination could be an odd reg on 32bit in which case 3118 // we can't load direct to the destination. 3119 3120 if (!d->is_even() && wordSize == 4) { 3121 d = L2; 3122 } 3123 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 3124 if (Assembler::is_simm13(off)) { 3125 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 3126 SP, off); 3127 __ ld_long(SP, off, d); 3128 } else { 3129 if (conversion_off == noreg) { 3130 __ set(off, L6); 3131 conversion_off = L6; 3132 } 3133 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 3134 SP, conversion_off); 3135 __ ld_long(SP, conversion_off, d); 3136 } 3137 if (d == L2) { 3138 long_move(masm, reg64_to_VMRegPair(L2), dst); 3139 } 3140 } 3141 break; 3142 3143 case T_LONG : 3144 // 32bit can't do a split move of something like g1 -> O0, O1 3145 // so use a memory temp 3146 if (src.is_single_phys_reg() && wordSize == 4) { 3147 Register tmp = L2; 3148 if (dst.first()->is_reg() && 3149 (wordSize == 8 || dst.first()->as_Register()->is_even())) { 3150 tmp = dst.first()->as_Register(); 3151 } 3152 3153 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 3154 if (Assembler::is_simm13(off)) { 3155 __ stx(src.first()->as_Register(), SP, off); 3156 __ ld_long(SP, off, tmp); 3157 } else { 3158 if (conversion_off == noreg) { 3159 __ set(off, L6); 3160 conversion_off = L6; 3161 } 3162 __ stx(src.first()->as_Register(), SP, conversion_off); 3163 __ ld_long(SP, conversion_off, tmp); 3164 } 3165 3166 if (tmp == L2) { 3167 long_move(masm, reg64_to_VMRegPair(L2), dst); 3168 } 3169 } else { 3170 long_move(masm, src, dst); 3171 } 3172 break; 3173 3174 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 3175 3176 default: 3177 move32_64(masm, src, dst); 3178 } 3179 } 3180 3181 3182 // If we have any strings we must store any register based arg to the stack 3183 // This includes any still live xmm registers too. 3184 3185 if (total_strings > 0 ) { 3186 3187 // protect all the arg registers 3188 __ save_frame(0); 3189 __ mov(G2_thread, L7_thread_cache); 3190 const Register L2_string_off = L2; 3191 3192 // Get first string offset 3193 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off); 3194 3195 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) { 3196 if (out_sig_bt[c_arg] == T_ADDRESS) { 3197 3198 VMRegPair dst = out_regs[c_arg]; 3199 const Register d = dst.first()->is_reg() ? 3200 dst.first()->as_Register()->after_save() : noreg; 3201 3202 // It's a string the oop and it was already copied to the out arg 3203 // position 3204 if (d != noreg) { 3205 __ mov(d, O0); 3206 } else { 3207 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 3208 "must be"); 3209 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0); 3210 } 3211 Label skip; 3212 3213 __ br_null(O0, false, Assembler::pn, skip); 3214 __ delayed()->add(FP, L2_string_off, O1); 3215 3216 if (d != noreg) { 3217 __ mov(O1, d); 3218 } else { 3219 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 3220 "must be"); 3221 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS); 3222 } 3223 3224 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf), 3225 relocInfo::runtime_call_type); 3226 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off); 3227 3228 __ bind(skip); 3229 3230 } 3231 3232 } 3233 __ mov(L7_thread_cache, G2_thread); 3234 __ restore(); 3235 3236 } 3237 3238 3239 // Ok now we are done. Need to place the nop that dtrace wants in order to 3240 // patch in the trap 3241 3242 int patch_offset = ((intptr_t)__ pc()) - start; 3243 3244 __ nop(); 3245 3246 3247 // Return 3248 3249 __ ret(); 3250 __ delayed()->restore(); 3251 3252 __ flush(); 3253 3254 nmethod *nm = nmethod::new_dtrace_nmethod( 3255 method, masm->code(), vep_offset, patch_offset, frame_complete, 3256 stack_slots / VMRegImpl::slots_per_word); 3257 return nm; 3258 3259 } 3260 3261 #endif // HAVE_DTRACE_H 3262 3263 // this function returns the adjust size (in number of words) to a c2i adapter 3264 // activation for use during deoptimization 3265 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 3266 assert(callee_locals >= callee_parameters, 3267 "test and remove; got more parms than locals"); 3268 if (callee_locals < callee_parameters) 3269 return 0; // No adjustment for negative locals 3270 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 3271 return round_to(diff, WordsPerLong); 3272 } 3273 3274 // "Top of Stack" slots that may be unused by the calling convention but must 3275 // otherwise be preserved. 3276 // On Intel these are not necessary and the value can be zero. 3277 // On Sparc this describes the words reserved for storing a register window 3278 // when an interrupt occurs. 3279 uint SharedRuntime::out_preserve_stack_slots() { 3280 return frame::register_save_words * VMRegImpl::slots_per_word; 3281 } 3282 3283 static void gen_new_frame(MacroAssembler* masm, bool deopt) { 3284 // 3285 // Common out the new frame generation for deopt and uncommon trap 3286 // 3287 Register G3pcs = G3_scratch; // Array of new pcs (input) 3288 Register Oreturn0 = O0; 3289 Register Oreturn1 = O1; 3290 Register O2UnrollBlock = O2; 3291 Register O3array = O3; // Array of frame sizes (input) 3292 Register O4array_size = O4; // number of frames (input) 3293 Register O7frame_size = O7; // number of frames (input) 3294 3295 __ ld_ptr(O3array, 0, O7frame_size); 3296 __ sub(G0, O7frame_size, O7frame_size); 3297 __ save(SP, O7frame_size, SP); 3298 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc 3299 3300 #ifdef ASSERT 3301 // make sure that the frames are aligned properly 3302 #ifndef _LP64 3303 __ btst(wordSize*2-1, SP); 3304 __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc); 3305 #endif 3306 #endif 3307 3308 // Deopt needs to pass some extra live values from frame to frame 3309 3310 if (deopt) { 3311 __ mov(Oreturn0->after_save(), Oreturn0); 3312 __ mov(Oreturn1->after_save(), Oreturn1); 3313 } 3314 3315 __ mov(O4array_size->after_save(), O4array_size); 3316 __ sub(O4array_size, 1, O4array_size); 3317 __ mov(O3array->after_save(), O3array); 3318 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock); 3319 __ add(G3pcs, wordSize, G3pcs); // point to next pc value 3320 3321 #ifdef ASSERT 3322 // trash registers to show a clear pattern in backtraces 3323 __ set(0xDEAD0000, I0); 3324 __ add(I0, 2, I1); 3325 __ add(I0, 4, I2); 3326 __ add(I0, 6, I3); 3327 __ add(I0, 8, I4); 3328 // Don't touch I5 could have valuable savedSP 3329 __ set(0xDEADBEEF, L0); 3330 __ mov(L0, L1); 3331 __ mov(L0, L2); 3332 __ mov(L0, L3); 3333 __ mov(L0, L4); 3334 __ mov(L0, L5); 3335 3336 // trash the return value as there is nothing to return yet 3337 __ set(0xDEAD0001, O7); 3338 #endif 3339 3340 __ mov(SP, O5_savedSP); 3341 } 3342 3343 3344 static void make_new_frames(MacroAssembler* masm, bool deopt) { 3345 // 3346 // loop through the UnrollBlock info and create new frames 3347 // 3348 Register G3pcs = G3_scratch; 3349 Register Oreturn0 = O0; 3350 Register Oreturn1 = O1; 3351 Register O2UnrollBlock = O2; 3352 Register O3array = O3; 3353 Register O4array_size = O4; 3354 Label loop; 3355 3356 // Before we make new frames, check to see if stack is available. 3357 // Do this after the caller's return address is on top of stack 3358 if (UseStackBanging) { 3359 // Get total frame size for interpreted frames 3360 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4); 3361 __ bang_stack_size(O4, O3, G3_scratch); 3362 } 3363 3364 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size); 3365 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs); 3366 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array); 3367 3368 // Adjust old interpreter frame to make space for new frame's extra java locals 3369 // 3370 // We capture the original sp for the transition frame only because it is needed in 3371 // order to properly calculate interpreter_sp_adjustment. Even though in real life 3372 // every interpreter frame captures a savedSP it is only needed at the transition 3373 // (fortunately). If we had to have it correct everywhere then we would need to 3374 // be told the sp_adjustment for each frame we create. If the frame size array 3375 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 3376 // for each frame we create and keep up the illusion every where. 3377 // 3378 3379 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7); 3380 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 3381 __ sub(SP, O7, SP); 3382 3383 #ifdef ASSERT 3384 // make sure that there is at least one entry in the array 3385 __ tst(O4array_size); 3386 __ breakpoint_trap(Assembler::zero, Assembler::icc); 3387 #endif 3388 3389 // Now push the new interpreter frames 3390 __ bind(loop); 3391 3392 // allocate a new frame, filling the registers 3393 3394 gen_new_frame(masm, deopt); // allocate an interpreter frame 3395 3396 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop); 3397 __ delayed()->add(O3array, wordSize, O3array); 3398 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc 3399 3400 } 3401 3402 //------------------------------generate_deopt_blob---------------------------- 3403 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3404 // instead. 3405 void SharedRuntime::generate_deopt_blob() { 3406 // allocate space for the code 3407 ResourceMark rm; 3408 // setup code generation tools 3409 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 3410 if (UseStackBanging) { 3411 pad += StackShadowPages*16 + 32; 3412 } 3413 #ifdef _LP64 3414 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 3415 #else 3416 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread) 3417 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread) 3418 CodeBuffer buffer("deopt_blob", 1600+pad, 512); 3419 #endif /* _LP64 */ 3420 MacroAssembler* masm = new MacroAssembler(&buffer); 3421 FloatRegister Freturn0 = F0; 3422 Register Greturn1 = G1; 3423 Register Oreturn0 = O0; 3424 Register Oreturn1 = O1; 3425 Register O2UnrollBlock = O2; 3426 Register L0deopt_mode = L0; 3427 Register G4deopt_mode = G4_scratch; 3428 int frame_size_words; 3429 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS); 3430 #if !defined(_LP64) && defined(COMPILER2) 3431 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS); 3432 #endif 3433 Label cont; 3434 3435 OopMapSet *oop_maps = new OopMapSet(); 3436 3437 // 3438 // This is the entry point for code which is returning to a de-optimized 3439 // frame. 3440 // The steps taken by this frame are as follows: 3441 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1) 3442 // and all potentially live registers (at a pollpoint many registers can be live). 3443 // 3444 // - call the C routine: Deoptimization::fetch_unroll_info (this function 3445 // returns information about the number and size of interpreter frames 3446 // which are equivalent to the frame which is being deoptimized) 3447 // - deallocate the unpack frame, restoring only results values. Other 3448 // volatile registers will now be captured in the vframeArray as needed. 3449 // - deallocate the deoptimization frame 3450 // - in a loop using the information returned in the previous step 3451 // push new interpreter frames (take care to propagate the return 3452 // values through each new frame pushed) 3453 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0) 3454 // - call the C routine: Deoptimization::unpack_frames (this function 3455 // lays out values on the interpreter frame which was just created) 3456 // - deallocate the dummy unpack_frame 3457 // - ensure that all the return values are correctly set and then do 3458 // a return to the interpreter entry point 3459 // 3460 // Refer to the following methods for more information: 3461 // - Deoptimization::fetch_unroll_info 3462 // - Deoptimization::unpack_frames 3463 3464 OopMap* map = NULL; 3465 3466 int start = __ offset(); 3467 3468 // restore G2, the trampoline destroyed it 3469 __ get_thread(); 3470 3471 // On entry we have been called by the deoptimized nmethod with a call that 3472 // replaced the original call (or safepoint polling location) so the deoptimizing 3473 // pc is now in O7. Return values are still in the expected places 3474 3475 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3476 __ ba(cont); 3477 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode); 3478 3479 int exception_offset = __ offset() - start; 3480 3481 // restore G2, the trampoline destroyed it 3482 __ get_thread(); 3483 3484 // On entry we have been jumped to by the exception handler (or exception_blob 3485 // for server). O0 contains the exception oop and O7 contains the original 3486 // exception pc. So if we push a frame here it will look to the 3487 // stack walking code (fetch_unroll_info) just like a normal call so 3488 // state will be extracted normally. 3489 3490 // save exception oop in JavaThread and fall through into the 3491 // exception_in_tls case since they are handled in same way except 3492 // for where the pending exception is kept. 3493 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); 3494 3495 // 3496 // Vanilla deoptimization with an exception pending in exception_oop 3497 // 3498 int exception_in_tls_offset = __ offset() - start; 3499 3500 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3501 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3502 3503 // Restore G2_thread 3504 __ get_thread(); 3505 3506 #ifdef ASSERT 3507 { 3508 // verify that there is really an exception oop in exception_oop 3509 Label has_exception; 3510 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); 3511 __ br_notnull_short(Oexception, Assembler::pt, has_exception); 3512 __ stop("no exception in thread"); 3513 __ bind(has_exception); 3514 3515 // verify that there is no pending exception 3516 Label no_pending_exception; 3517 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 3518 __ ld_ptr(exception_addr, Oexception); 3519 __ br_null_short(Oexception, Assembler::pt, no_pending_exception); 3520 __ stop("must not have pending exception here"); 3521 __ bind(no_pending_exception); 3522 } 3523 #endif 3524 3525 __ ba(cont); 3526 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);; 3527 3528 // 3529 // Reexecute entry, similar to c2 uncommon trap 3530 // 3531 int reexecute_offset = __ offset() - start; 3532 3533 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3534 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3535 3536 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode); 3537 3538 __ bind(cont); 3539 3540 __ set_last_Java_frame(SP, noreg); 3541 3542 // do the call by hand so we can get the oopmap 3543 3544 __ mov(G2_thread, L7_thread_cache); 3545 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); 3546 __ delayed()->mov(G2_thread, O0); 3547 3548 // Set an oopmap for the call site this describes all our saved volatile registers 3549 3550 oop_maps->add_gc_map( __ offset()-start, map); 3551 3552 __ mov(L7_thread_cache, G2_thread); 3553 3554 __ reset_last_Java_frame(); 3555 3556 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers 3557 // so this move will survive 3558 3559 __ mov(L0deopt_mode, G4deopt_mode); 3560 3561 __ mov(O0, O2UnrollBlock->after_save()); 3562 3563 RegisterSaver::restore_result_registers(masm); 3564 3565 Label noException; 3566 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException); 3567 3568 // Move the pending exception from exception_oop to Oexception so 3569 // the pending exception will be picked up the interpreter. 3570 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3571 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3572 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 3573 __ bind(noException); 3574 3575 // deallocate the deoptimization frame taking care to preserve the return values 3576 __ mov(Oreturn0, Oreturn0->after_save()); 3577 __ mov(Oreturn1, Oreturn1->after_save()); 3578 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3579 __ restore(); 3580 3581 // Allocate new interpreter frame(s) and possible c2i adapter frame 3582 3583 make_new_frames(masm, true); 3584 3585 // push a dummy "unpack_frame" taking care of float return values and 3586 // call Deoptimization::unpack_frames to have the unpacker layout 3587 // information in the interpreter frames just created and then return 3588 // to the interpreter entry point 3589 __ save(SP, -frame_size_words*wordSize, SP); 3590 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); 3591 #if !defined(_LP64) 3592 #if defined(COMPILER2) 3593 // 32-bit 1-register longs return longs in G1 3594 __ stx(Greturn1, saved_Greturn1_addr); 3595 #endif 3596 __ set_last_Java_frame(SP, noreg); 3597 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode); 3598 #else 3599 // LP64 uses g4 in set_last_Java_frame 3600 __ mov(G4deopt_mode, O1); 3601 __ set_last_Java_frame(SP, G0); 3602 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); 3603 #endif 3604 __ reset_last_Java_frame(); 3605 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); 3606 3607 #if !defined(_LP64) && defined(COMPILER2) 3608 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into 3609 // I0/I1 if the return value is long. 3610 Label not_long; 3611 __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long); 3612 __ ldd(saved_Greturn1_addr,I0); 3613 __ bind(not_long); 3614 #endif 3615 __ ret(); 3616 __ delayed()->restore(); 3617 3618 masm->flush(); 3619 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words); 3620 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3621 } 3622 3623 #ifdef COMPILER2 3624 3625 //------------------------------generate_uncommon_trap_blob-------------------- 3626 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3627 // instead. 3628 void SharedRuntime::generate_uncommon_trap_blob() { 3629 // allocate space for the code 3630 ResourceMark rm; 3631 // setup code generation tools 3632 int pad = VerifyThread ? 512 : 0; 3633 if (UseStackBanging) { 3634 pad += StackShadowPages*16 + 32; 3635 } 3636 #ifdef _LP64 3637 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 3638 #else 3639 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread) 3640 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread) 3641 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512); 3642 #endif 3643 MacroAssembler* masm = new MacroAssembler(&buffer); 3644 Register O2UnrollBlock = O2; 3645 Register O2klass_index = O2; 3646 3647 // 3648 // This is the entry point for all traps the compiler takes when it thinks 3649 // it cannot handle further execution of compilation code. The frame is 3650 // deoptimized in these cases and converted into interpreter frames for 3651 // execution 3652 // The steps taken by this frame are as follows: 3653 // - push a fake "unpack_frame" 3654 // - call the C routine Deoptimization::uncommon_trap (this function 3655 // packs the current compiled frame into vframe arrays and returns 3656 // information about the number and size of interpreter frames which 3657 // are equivalent to the frame which is being deoptimized) 3658 // - deallocate the "unpack_frame" 3659 // - deallocate the deoptimization frame 3660 // - in a loop using the information returned in the previous step 3661 // push interpreter frames; 3662 // - create a dummy "unpack_frame" 3663 // - call the C routine: Deoptimization::unpack_frames (this function 3664 // lays out values on the interpreter frame which was just created) 3665 // - deallocate the dummy unpack_frame 3666 // - return to the interpreter entry point 3667 // 3668 // Refer to the following methods for more information: 3669 // - Deoptimization::uncommon_trap 3670 // - Deoptimization::unpack_frame 3671 3672 // the unloaded class index is in O0 (first parameter to this blob) 3673 3674 // push a dummy "unpack_frame" 3675 // and call Deoptimization::uncommon_trap to pack the compiled frame into 3676 // vframe array and return the UnrollBlock information 3677 __ save_frame(0); 3678 __ set_last_Java_frame(SP, noreg); 3679 __ mov(I0, O2klass_index); 3680 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index); 3681 __ reset_last_Java_frame(); 3682 __ mov(O0, O2UnrollBlock->after_save()); 3683 __ restore(); 3684 3685 // deallocate the deoptimized frame taking care to preserve the return values 3686 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3687 __ restore(); 3688 3689 // Allocate new interpreter frame(s) and possible c2i adapter frame 3690 3691 make_new_frames(masm, false); 3692 3693 // push a dummy "unpack_frame" taking care of float return values and 3694 // call Deoptimization::unpack_frames to have the unpacker layout 3695 // information in the interpreter frames just created and then return 3696 // to the interpreter entry point 3697 __ save_frame(0); 3698 __ set_last_Java_frame(SP, noreg); 3699 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case 3700 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3); 3701 __ reset_last_Java_frame(); 3702 __ ret(); 3703 __ delayed()->restore(); 3704 3705 masm->flush(); 3706 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize); 3707 } 3708 3709 #endif // COMPILER2 3710 3711 //------------------------------generate_handler_blob------------------- 3712 // 3713 // Generate a special Compile2Runtime blob that saves all registers, and sets 3714 // up an OopMap. 3715 // 3716 // This blob is jumped to (via a breakpoint and the signal handler) from a 3717 // safepoint in compiled code. On entry to this blob, O7 contains the 3718 // address in the original nmethod at which we should resume normal execution. 3719 // Thus, this blob looks like a subroutine which must preserve lots of 3720 // registers and return normally. Note that O7 is never register-allocated, 3721 // so it is guaranteed to be free here. 3722 // 3723 3724 // The hardest part of what this blob must do is to save the 64-bit %o 3725 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and 3726 // an interrupt will chop off their heads. Making space in the caller's frame 3727 // first will let us save the 64-bit %o's before save'ing, but we cannot hand 3728 // the adjusted FP off to the GC stack-crawler: this will modify the caller's 3729 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save 3730 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). 3731 // Tricky, tricky, tricky... 3732 3733 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3734 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3735 3736 // allocate space for the code 3737 ResourceMark rm; 3738 // setup code generation tools 3739 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3740 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3741 // even larger with TraceJumps 3742 int pad = TraceJumps ? 512 : 0; 3743 CodeBuffer buffer("handler_blob", 1600 + pad, 512); 3744 MacroAssembler* masm = new MacroAssembler(&buffer); 3745 int frame_size_words; 3746 OopMapSet *oop_maps = new OopMapSet(); 3747 OopMap* map = NULL; 3748 3749 int start = __ offset(); 3750 3751 bool cause_return = (poll_type == POLL_AT_RETURN); 3752 // If this causes a return before the processing, then do a "restore" 3753 if (cause_return) { 3754 __ restore(); 3755 } else { 3756 // Make it look like we were called via the poll 3757 // so that frame constructor always sees a valid return address 3758 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7); 3759 __ sub(O7, frame::pc_return_offset, O7); 3760 } 3761 3762 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3763 3764 // setup last_Java_sp (blows G4) 3765 __ set_last_Java_frame(SP, noreg); 3766 3767 // call into the runtime to handle illegal instructions exception 3768 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3769 __ mov(G2_thread, O0); 3770 __ save_thread(L7_thread_cache); 3771 __ call(call_ptr); 3772 __ delayed()->nop(); 3773 3774 // Set an oopmap for the call site. 3775 // We need this not only for callee-saved registers, but also for volatile 3776 // registers that the compiler might be keeping live across a safepoint. 3777 3778 oop_maps->add_gc_map( __ offset() - start, map); 3779 3780 __ restore_thread(L7_thread_cache); 3781 // clear last_Java_sp 3782 __ reset_last_Java_frame(); 3783 3784 // Check for exceptions 3785 Label pending; 3786 3787 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3788 __ br_notnull_short(O1, Assembler::pn, pending); 3789 3790 RegisterSaver::restore_live_registers(masm); 3791 3792 // We are back the the original state on entry and ready to go. 3793 3794 __ retl(); 3795 __ delayed()->nop(); 3796 3797 // Pending exception after the safepoint 3798 3799 __ bind(pending); 3800 3801 RegisterSaver::restore_live_registers(masm); 3802 3803 // We are back the the original state on entry. 3804 3805 // Tail-call forward_exception_entry, with the issuing PC in O7, 3806 // so it looks like the original nmethod called forward_exception_entry. 3807 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3808 __ JMP(O0, 0); 3809 __ delayed()->nop(); 3810 3811 // ------------- 3812 // make sure all code is generated 3813 masm->flush(); 3814 3815 // return exception blob 3816 return SafepointBlob::create(&buffer, oop_maps, frame_size_words); 3817 } 3818 3819 // 3820 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3821 // 3822 // Generate a stub that calls into vm to find out the proper destination 3823 // of a java call. All the argument registers are live at this point 3824 // but since this is generic code we don't know what they are and the caller 3825 // must do any gc of the args. 3826 // 3827 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3828 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3829 3830 // allocate space for the code 3831 ResourceMark rm; 3832 // setup code generation tools 3833 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3834 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3835 // even larger with TraceJumps 3836 int pad = TraceJumps ? 512 : 0; 3837 CodeBuffer buffer(name, 1600 + pad, 512); 3838 MacroAssembler* masm = new MacroAssembler(&buffer); 3839 int frame_size_words; 3840 OopMapSet *oop_maps = new OopMapSet(); 3841 OopMap* map = NULL; 3842 3843 int start = __ offset(); 3844 3845 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3846 3847 int frame_complete = __ offset(); 3848 3849 // setup last_Java_sp (blows G4) 3850 __ set_last_Java_frame(SP, noreg); 3851 3852 // call into the runtime to handle illegal instructions exception 3853 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3854 __ mov(G2_thread, O0); 3855 __ save_thread(L7_thread_cache); 3856 __ call(destination, relocInfo::runtime_call_type); 3857 __ delayed()->nop(); 3858 3859 // O0 contains the address we are going to jump to assuming no exception got installed 3860 3861 // Set an oopmap for the call site. 3862 // We need this not only for callee-saved registers, but also for volatile 3863 // registers that the compiler might be keeping live across a safepoint. 3864 3865 oop_maps->add_gc_map( __ offset() - start, map); 3866 3867 __ restore_thread(L7_thread_cache); 3868 // clear last_Java_sp 3869 __ reset_last_Java_frame(); 3870 3871 // Check for exceptions 3872 Label pending; 3873 3874 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3875 __ br_notnull_short(O1, Assembler::pn, pending); 3876 3877 // get the returned Method* 3878 3879 __ get_vm_result_2(G5_method); 3880 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 3881 3882 // O0 is where we want to jump, overwrite G3 which is saved and scratch 3883 3884 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 3885 3886 RegisterSaver::restore_live_registers(masm); 3887 3888 // We are back the the original state on entry and ready to go. 3889 3890 __ JMP(G3, 0); 3891 __ delayed()->nop(); 3892 3893 // Pending exception after the safepoint 3894 3895 __ bind(pending); 3896 3897 RegisterSaver::restore_live_registers(masm); 3898 3899 // We are back the the original state on entry. 3900 3901 // Tail-call forward_exception_entry, with the issuing PC in O7, 3902 // so it looks like the original nmethod called forward_exception_entry. 3903 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3904 __ JMP(O0, 0); 3905 __ delayed()->nop(); 3906 3907 // ------------- 3908 // make sure all code is generated 3909 masm->flush(); 3910 3911 // return the blob 3912 // frame_size_words or bytes?? 3913 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3914 }