1 /* 2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "code/debugInfoRec.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/vtableStubs.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "oops/compiledICHolder.hpp" 32 #include "prims/jvmtiRedefineClassesTrace.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "runtime/vframeArray.hpp" 35 #include "vmreg_sparc.inline.hpp" 36 #ifdef COMPILER1 37 #include "c1/c1_Runtime1.hpp" 38 #endif 39 #ifdef COMPILER2 40 #include "opto/runtime.hpp" 41 #endif 42 #ifdef SHARK 43 #include "compiler/compileBroker.hpp" 44 #include "shark/sharkCompiler.hpp" 45 #endif 46 47 #define __ masm-> 48 49 50 class RegisterSaver { 51 52 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O. 53 // The Oregs are problematic. In the 32bit build the compiler can 54 // have O registers live with 64 bit quantities. A window save will 55 // cut the heads off of the registers. We have to do a very extensive 56 // stack dance to save and restore these properly. 57 58 // Note that the Oregs problem only exists if we block at either a polling 59 // page exception a compiled code safepoint that was not originally a call 60 // or deoptimize following one of these kinds of safepoints. 61 62 // Lots of registers to save. For all builds, a window save will preserve 63 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit 64 // builds a window-save will preserve the %o registers. In the LION build 65 // we need to save the 64-bit %o registers which requires we save them 66 // before the window-save (as then they become %i registers and get their 67 // heads chopped off on interrupt). We have to save some %g registers here 68 // as well. 69 enum { 70 // This frame's save area. Includes extra space for the native call: 71 // vararg's layout space and the like. Briefly holds the caller's 72 // register save area. 73 call_args_area = frame::register_save_words_sp_offset + 74 frame::memory_parameter_word_sp_offset*wordSize, 75 // Make sure save locations are always 8 byte aligned. 76 // can't use round_to because it doesn't produce compile time constant 77 start_of_extra_save_area = ((call_args_area + 7) & ~7), 78 g1_offset = start_of_extra_save_area, // g-regs needing saving 79 g3_offset = g1_offset+8, 80 g4_offset = g3_offset+8, 81 g5_offset = g4_offset+8, 82 o0_offset = g5_offset+8, 83 o1_offset = o0_offset+8, 84 o2_offset = o1_offset+8, 85 o3_offset = o2_offset+8, 86 o4_offset = o3_offset+8, 87 o5_offset = o4_offset+8, 88 start_of_flags_save_area = o5_offset+8, 89 ccr_offset = start_of_flags_save_area, 90 fsr_offset = ccr_offset + 8, 91 d00_offset = fsr_offset+8, // Start of float save area 92 register_save_size = d00_offset+8*32 93 }; 94 95 96 public: 97 98 static int Oexception_offset() { return o0_offset; }; 99 static int G3_offset() { return g3_offset; }; 100 static int G5_offset() { return g5_offset; }; 101 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 102 static void restore_live_registers(MacroAssembler* masm); 103 104 // During deoptimization only the result register need to be restored 105 // all the other values have already been extracted. 106 107 static void restore_result_registers(MacroAssembler* masm); 108 }; 109 110 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 111 // Record volatile registers as callee-save values in an OopMap so their save locations will be 112 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 113 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 114 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 115 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 116 int i; 117 // Always make the frame size 16 byte aligned. 118 int frame_size = round_to(additional_frame_words + register_save_size, 16); 119 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words 120 int frame_size_in_slots = frame_size / sizeof(jint); 121 // CodeBlob frame size is in words. 122 *total_frame_words = frame_size / wordSize; 123 // OopMap* map = new OopMap(*total_frame_words, 0); 124 OopMap* map = new OopMap(frame_size_in_slots, 0); 125 126 #if !defined(_LP64) 127 128 // Save 64-bit O registers; they will get their heads chopped off on a 'save'. 129 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 130 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 131 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 132 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 133 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 134 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 135 #endif /* _LP64 */ 136 137 __ save(SP, -frame_size, SP); 138 139 #ifndef _LP64 140 // Reload the 64 bit Oregs. Although they are now Iregs we load them 141 // to Oregs here to avoid interrupts cutting off their heads 142 143 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 144 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 145 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 146 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 147 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 148 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 149 150 __ stx(O0, SP, o0_offset+STACK_BIAS); 151 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg()); 152 153 __ stx(O1, SP, o1_offset+STACK_BIAS); 154 155 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg()); 156 157 __ stx(O2, SP, o2_offset+STACK_BIAS); 158 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg()); 159 160 __ stx(O3, SP, o3_offset+STACK_BIAS); 161 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg()); 162 163 __ stx(O4, SP, o4_offset+STACK_BIAS); 164 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg()); 165 166 __ stx(O5, SP, o5_offset+STACK_BIAS); 167 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg()); 168 #endif /* _LP64 */ 169 170 171 #ifdef _LP64 172 int debug_offset = 0; 173 #else 174 int debug_offset = 4; 175 #endif 176 // Save the G's 177 __ stx(G1, SP, g1_offset+STACK_BIAS); 178 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); 179 180 __ stx(G3, SP, g3_offset+STACK_BIAS); 181 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg()); 182 183 __ stx(G4, SP, g4_offset+STACK_BIAS); 184 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg()); 185 186 __ stx(G5, SP, g5_offset+STACK_BIAS); 187 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg()); 188 189 // This is really a waste but we'll keep things as they were for now 190 if (true) { 191 #ifndef _LP64 192 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next()); 193 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next()); 194 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next()); 195 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next()); 196 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next()); 197 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next()); 198 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next()); 199 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next()); 200 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next()); 201 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next()); 202 #endif /* _LP64 */ 203 } 204 205 206 // Save the flags 207 __ rdccr( G5 ); 208 __ stx(G5, SP, ccr_offset+STACK_BIAS); 209 __ stxfsr(SP, fsr_offset+STACK_BIAS); 210 211 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles) 212 int offset = d00_offset; 213 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 214 FloatRegister f = as_FloatRegister(i); 215 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS); 216 // Record as callee saved both halves of double registers (2 float registers). 217 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg()); 218 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next()); 219 offset += sizeof(double); 220 } 221 222 // And we're done. 223 224 return map; 225 } 226 227 228 // Pop the current frame and restore all the registers that we 229 // saved. 230 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 231 232 // Restore all the FP registers 233 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 234 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i)); 235 } 236 237 __ ldx(SP, ccr_offset+STACK_BIAS, G1); 238 __ wrccr (G1) ; 239 240 // Restore the G's 241 // Note that G2 (AKA GThread) must be saved and restored separately. 242 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr. 243 244 __ ldx(SP, g1_offset+STACK_BIAS, G1); 245 __ ldx(SP, g3_offset+STACK_BIAS, G3); 246 __ ldx(SP, g4_offset+STACK_BIAS, G4); 247 __ ldx(SP, g5_offset+STACK_BIAS, G5); 248 249 250 #if !defined(_LP64) 251 // Restore the 64-bit O's. 252 __ ldx(SP, o0_offset+STACK_BIAS, O0); 253 __ ldx(SP, o1_offset+STACK_BIAS, O1); 254 __ ldx(SP, o2_offset+STACK_BIAS, O2); 255 __ ldx(SP, o3_offset+STACK_BIAS, O3); 256 __ ldx(SP, o4_offset+STACK_BIAS, O4); 257 __ ldx(SP, o5_offset+STACK_BIAS, O5); 258 259 // And temporarily place them in TLS 260 261 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 262 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 263 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 264 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 265 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 266 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 267 #endif /* _LP64 */ 268 269 // Restore flags 270 271 __ ldxfsr(SP, fsr_offset+STACK_BIAS); 272 273 __ restore(); 274 275 #if !defined(_LP64) 276 // Now reload the 64bit Oregs after we've restore the window. 277 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 278 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 279 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 280 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 281 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 282 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 283 #endif /* _LP64 */ 284 285 } 286 287 // Pop the current frame and restore the registers that might be holding 288 // a result. 289 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 290 291 #if !defined(_LP64) 292 // 32bit build returns longs in G1 293 __ ldx(SP, g1_offset+STACK_BIAS, G1); 294 295 // Retrieve the 64-bit O's. 296 __ ldx(SP, o0_offset+STACK_BIAS, O0); 297 __ ldx(SP, o1_offset+STACK_BIAS, O1); 298 // and save to TLS 299 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 300 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 301 #endif /* _LP64 */ 302 303 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); 304 305 __ restore(); 306 307 #if !defined(_LP64) 308 // Now reload the 64bit Oregs after we've restore the window. 309 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 310 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 311 #endif /* _LP64 */ 312 313 } 314 315 // Is vector's size (in bytes) bigger than a size saved by default? 316 // 8 bytes FP registers are saved by default on SPARC. 317 bool SharedRuntime::is_wide_vector(int size) { 318 // Note, MaxVectorSize == 8 on SPARC. 319 assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size)); 320 return size > 8; 321 } 322 323 // The java_calling_convention describes stack locations as ideal slots on 324 // a frame with no abi restrictions. Since we must observe abi restrictions 325 // (like the placement of the register window) the slots must be biased by 326 // the following value. 327 static int reg2offset(VMReg r) { 328 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 329 } 330 331 static VMRegPair reg64_to_VMRegPair(Register r) { 332 VMRegPair ret; 333 if (wordSize == 8) { 334 ret.set2(r->as_VMReg()); 335 } else { 336 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg()); 337 } 338 return ret; 339 } 340 341 // --------------------------------------------------------------------------- 342 // Read the array of BasicTypes from a signature, and compute where the 343 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 344 // quantities. Values less than VMRegImpl::stack0 are registers, those above 345 // refer to 4-byte stack slots. All stack slots are based off of the window 346 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window, 347 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 348 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit 349 // integer registers. Values 64-95 are the (32-bit only) float registers. 350 // Each 32-bit quantity is given its own number, so the integer registers 351 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is 352 // an O0-low and an O0-high. Essentially, all int register numbers are doubled. 353 354 // Register results are passed in O0-O5, for outgoing call arguments. To 355 // convert to incoming arguments, convert all O's to I's. The regs array 356 // refer to the low and hi 32-bit words of 64-bit registers or stack slots. 357 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a 358 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was 359 // passed (used as a placeholder for the other half of longs and doubles in 360 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is 361 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention). 362 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first() 363 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the 364 // same VMRegPair. 365 366 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 367 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 368 // units regardless of build. 369 370 371 // --------------------------------------------------------------------------- 372 // The compiled Java calling convention. The Java convention always passes 373 // 64-bit values in adjacent aligned locations (either registers or stack), 374 // floats in float registers and doubles in aligned float pairs. There is 375 // no backing varargs store for values in registers. 376 // In the 32-bit build, longs are passed on the stack (cannot be 377 // passed in I's, because longs in I's get their heads chopped off at 378 // interrupt). 379 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 380 VMRegPair *regs, 381 int total_args_passed, 382 int is_outgoing) { 383 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers"); 384 385 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM; 386 const int flt_reg_max = 8; 387 388 int int_reg = 0; 389 int flt_reg = 0; 390 int slot = 0; 391 392 for (int i = 0; i < total_args_passed; i++) { 393 switch (sig_bt[i]) { 394 case T_INT: 395 case T_SHORT: 396 case T_CHAR: 397 case T_BYTE: 398 case T_BOOLEAN: 399 #ifndef _LP64 400 case T_OBJECT: 401 case T_ARRAY: 402 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 403 #endif // _LP64 404 if (int_reg < int_reg_max) { 405 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 406 regs[i].set1(r->as_VMReg()); 407 } else { 408 regs[i].set1(VMRegImpl::stack2reg(slot++)); 409 } 410 break; 411 412 #ifdef _LP64 413 case T_LONG: 414 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 415 // fall-through 416 case T_OBJECT: 417 case T_ARRAY: 418 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 419 if (int_reg < int_reg_max) { 420 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 421 regs[i].set2(r->as_VMReg()); 422 } else { 423 slot = round_to(slot, 2); // align 424 regs[i].set2(VMRegImpl::stack2reg(slot)); 425 slot += 2; 426 } 427 break; 428 #else 429 case T_LONG: 430 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 431 // On 32-bit SPARC put longs always on the stack to keep the pressure off 432 // integer argument registers. They should be used for oops. 433 slot = round_to(slot, 2); // align 434 regs[i].set2(VMRegImpl::stack2reg(slot)); 435 slot += 2; 436 #endif 437 break; 438 439 case T_FLOAT: 440 if (flt_reg < flt_reg_max) { 441 FloatRegister r = as_FloatRegister(flt_reg++); 442 regs[i].set1(r->as_VMReg()); 443 } else { 444 regs[i].set1(VMRegImpl::stack2reg(slot++)); 445 } 446 break; 447 448 case T_DOUBLE: 449 assert(sig_bt[i+1] == T_VOID, "expecting half"); 450 if (round_to(flt_reg, 2) + 1 < flt_reg_max) { 451 flt_reg = round_to(flt_reg, 2); // align 452 FloatRegister r = as_FloatRegister(flt_reg); 453 regs[i].set2(r->as_VMReg()); 454 flt_reg += 2; 455 } else { 456 slot = round_to(slot, 2); // align 457 regs[i].set2(VMRegImpl::stack2reg(slot)); 458 slot += 2; 459 } 460 break; 461 462 case T_VOID: 463 regs[i].set_bad(); // Halves of longs & doubles 464 break; 465 466 default: 467 fatal(err_msg_res("unknown basic type %d", sig_bt[i])); 468 break; 469 } 470 } 471 472 // retun the amount of stack space these arguments will need. 473 return slot; 474 } 475 476 // Helper class mostly to avoid passing masm everywhere, and handle 477 // store displacement overflow logic. 478 class AdapterGenerator { 479 MacroAssembler *masm; 480 Register Rdisp; 481 void set_Rdisp(Register r) { Rdisp = r; } 482 483 void patch_callers_callsite(); 484 485 // base+st_off points to top of argument 486 int arg_offset(const int st_off) { return st_off; } 487 int next_arg_offset(const int st_off) { 488 return st_off - Interpreter::stackElementSize; 489 } 490 491 // Argument slot values may be loaded first into a register because 492 // they might not fit into displacement. 493 RegisterOrConstant arg_slot(const int st_off); 494 RegisterOrConstant next_arg_slot(const int st_off); 495 496 // Stores long into offset pointed to by base 497 void store_c2i_long(Register r, Register base, 498 const int st_off, bool is_stack); 499 void store_c2i_object(Register r, Register base, 500 const int st_off); 501 void store_c2i_int(Register r, Register base, 502 const int st_off); 503 void store_c2i_double(VMReg r_2, 504 VMReg r_1, Register base, const int st_off); 505 void store_c2i_float(FloatRegister f, Register base, 506 const int st_off); 507 508 public: 509 void gen_c2i_adapter(int total_args_passed, 510 // VMReg max_arg, 511 int comp_args_on_stack, // VMRegStackSlots 512 const BasicType *sig_bt, 513 const VMRegPair *regs, 514 Label& skip_fixup); 515 void gen_i2c_adapter(int total_args_passed, 516 // VMReg max_arg, 517 int comp_args_on_stack, // VMRegStackSlots 518 const BasicType *sig_bt, 519 const VMRegPair *regs); 520 521 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} 522 }; 523 524 525 // Patch the callers callsite with entry to compiled code if it exists. 526 void AdapterGenerator::patch_callers_callsite() { 527 Label L; 528 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 529 __ br_null(G3_scratch, false, Assembler::pt, L); 530 __ delayed()->nop(); 531 // Call into the VM to patch the caller, then jump to compiled callee 532 __ save_frame(4); // Args in compiled layout; do not blow them 533 534 // Must save all the live Gregs the list is: 535 // G1: 1st Long arg (32bit build) 536 // G2: global allocated to TLS 537 // G3: used in inline cache check (scratch) 538 // G4: 2nd Long arg (32bit build); 539 // G5: used in inline cache check (Method*) 540 541 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 542 543 #ifdef _LP64 544 // mov(s,d) 545 __ mov(G1, L1); 546 __ mov(G4, L4); 547 __ mov(G5_method, L5); 548 __ mov(G5_method, O0); // VM needs target method 549 __ mov(I7, O1); // VM needs caller's callsite 550 // Must be a leaf call... 551 // can be very far once the blob has been relocated 552 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 553 __ relocate(relocInfo::runtime_call_type); 554 __ jumpl_to(dest, O7, O7); 555 __ delayed()->mov(G2_thread, L7_thread_cache); 556 __ mov(L7_thread_cache, G2_thread); 557 __ mov(L1, G1); 558 __ mov(L4, G4); 559 __ mov(L5, G5_method); 560 #else 561 __ stx(G1, FP, -8 + STACK_BIAS); 562 __ stx(G4, FP, -16 + STACK_BIAS); 563 __ mov(G5_method, L5); 564 __ mov(G5_method, O0); // VM needs target method 565 __ mov(I7, O1); // VM needs caller's callsite 566 // Must be a leaf call... 567 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type); 568 __ delayed()->mov(G2_thread, L7_thread_cache); 569 __ mov(L7_thread_cache, G2_thread); 570 __ ldx(FP, -8 + STACK_BIAS, G1); 571 __ ldx(FP, -16 + STACK_BIAS, G4); 572 __ mov(L5, G5_method); 573 #endif /* _LP64 */ 574 575 __ restore(); // Restore args 576 __ bind(L); 577 } 578 579 580 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) { 581 RegisterOrConstant roc(arg_offset(st_off)); 582 return __ ensure_simm13_or_reg(roc, Rdisp); 583 } 584 585 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) { 586 RegisterOrConstant roc(next_arg_offset(st_off)); 587 return __ ensure_simm13_or_reg(roc, Rdisp); 588 } 589 590 591 // Stores long into offset pointed to by base 592 void AdapterGenerator::store_c2i_long(Register r, Register base, 593 const int st_off, bool is_stack) { 594 #ifdef _LP64 595 // In V9, longs are given 2 64-bit slots in the interpreter, but the 596 // data is passed in only 1 slot. 597 __ stx(r, base, next_arg_slot(st_off)); 598 #else 599 #ifdef COMPILER2 600 // Misaligned store of 64-bit data 601 __ stw(r, base, arg_slot(st_off)); // lo bits 602 __ srlx(r, 32, r); 603 __ stw(r, base, next_arg_slot(st_off)); // hi bits 604 #else 605 if (is_stack) { 606 // Misaligned store of 64-bit data 607 __ stw(r, base, arg_slot(st_off)); // lo bits 608 __ srlx(r, 32, r); 609 __ stw(r, base, next_arg_slot(st_off)); // hi bits 610 } else { 611 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits 612 __ stw(r , base, next_arg_slot(st_off)); // hi bits 613 } 614 #endif // COMPILER2 615 #endif // _LP64 616 } 617 618 void AdapterGenerator::store_c2i_object(Register r, Register base, 619 const int st_off) { 620 __ st_ptr (r, base, arg_slot(st_off)); 621 } 622 623 void AdapterGenerator::store_c2i_int(Register r, Register base, 624 const int st_off) { 625 __ st (r, base, arg_slot(st_off)); 626 } 627 628 // Stores into offset pointed to by base 629 void AdapterGenerator::store_c2i_double(VMReg r_2, 630 VMReg r_1, Register base, const int st_off) { 631 #ifdef _LP64 632 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 633 // data is passed in only 1 slot. 634 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 635 #else 636 // Need to marshal 64-bit value from misaligned Lesp loads 637 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 638 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) ); 639 #endif 640 } 641 642 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 643 const int st_off) { 644 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off)); 645 } 646 647 void AdapterGenerator::gen_c2i_adapter( 648 int total_args_passed, 649 // VMReg max_arg, 650 int comp_args_on_stack, // VMRegStackSlots 651 const BasicType *sig_bt, 652 const VMRegPair *regs, 653 Label& L_skip_fixup) { 654 655 // Before we get into the guts of the C2I adapter, see if we should be here 656 // at all. We've come from compiled code and are attempting to jump to the 657 // interpreter, which means the caller made a static call to get here 658 // (vcalls always get a compiled target if there is one). Check for a 659 // compiled target. If there is one, we need to patch the caller's call. 660 // However we will run interpreted if we come thru here. The next pass 661 // thru the call site will run compiled. If we ran compiled here then 662 // we can (theorectically) do endless i2c->c2i->i2c transitions during 663 // deopt/uncommon trap cycles. If we always go interpreted here then 664 // we can have at most one and don't need to play any tricks to keep 665 // from endlessly growing the stack. 666 // 667 // Actually if we detected that we had an i2c->c2i transition here we 668 // ought to be able to reset the world back to the state of the interpreted 669 // call and not bother building another interpreter arg area. We don't 670 // do that at this point. 671 672 patch_callers_callsite(); 673 674 __ bind(L_skip_fixup); 675 676 // Since all args are passed on the stack, total_args_passed*wordSize is the 677 // space we need. Add in varargs area needed by the interpreter. Round up 678 // to stack alignment. 679 const int arg_size = total_args_passed * Interpreter::stackElementSize; 680 const int varargs_area = 681 (frame::varargs_offset - frame::register_save_words)*wordSize; 682 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize); 683 684 const int bias = STACK_BIAS; 685 const int interp_arg_offset = frame::varargs_offset*wordSize + 686 (total_args_passed-1)*Interpreter::stackElementSize; 687 688 const Register base = SP; 689 690 // Make some extra space on the stack. 691 __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP); 692 set_Rdisp(G3_scratch); 693 694 // Write the args into the outgoing interpreter space. 695 for (int i = 0; i < total_args_passed; i++) { 696 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias; 697 VMReg r_1 = regs[i].first(); 698 VMReg r_2 = regs[i].second(); 699 if (!r_1->is_valid()) { 700 assert(!r_2->is_valid(), ""); 701 continue; 702 } 703 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1 704 RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias; 705 ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp); 706 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle 707 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); 708 else __ ldx(base, ld_off, G1_scratch); 709 } 710 711 if (r_1->is_Register()) { 712 Register r = r_1->as_Register()->after_restore(); 713 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 714 store_c2i_object(r, base, st_off); 715 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 716 store_c2i_long(r, base, st_off, r_2->is_stack()); 717 } else { 718 store_c2i_int(r, base, st_off); 719 } 720 } else { 721 assert(r_1->is_FloatRegister(), ""); 722 if (sig_bt[i] == T_FLOAT) { 723 store_c2i_float(r_1->as_FloatRegister(), base, st_off); 724 } else { 725 assert(sig_bt[i] == T_DOUBLE, "wrong type"); 726 store_c2i_double(r_2, r_1, base, st_off); 727 } 728 } 729 } 730 731 // Load the interpreter entry point. 732 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 733 734 // Pass O5_savedSP as an argument to the interpreter. 735 // The interpreter will restore SP to this value before returning. 736 __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP); 737 738 __ mov((frame::varargs_offset)*wordSize - 739 1*Interpreter::stackElementSize+bias+BytesPerWord, G1); 740 // Jump to the interpreter just as if interpreter was doing it. 741 __ jmpl(G3_scratch, 0, G0); 742 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp 743 // (really L0) is in use by the compiled frame as a generic temp. However, 744 // the interpreter does not know where its args are without some kind of 745 // arg pointer being passed in. Pass it in Gargs. 746 __ delayed()->add(SP, G1, Gargs); 747 } 748 749 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg, 750 address code_start, address code_end, 751 Label& L_ok) { 752 Label L_fail; 753 __ set(ExternalAddress(code_start), temp_reg); 754 __ set(pointer_delta(code_end, code_start, 1), temp2_reg); 755 __ cmp(pc_reg, temp_reg); 756 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail); 757 __ delayed()->add(temp_reg, temp2_reg, temp_reg); 758 __ cmp(pc_reg, temp_reg); 759 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); 760 __ bind(L_fail); 761 } 762 763 void AdapterGenerator::gen_i2c_adapter( 764 int total_args_passed, 765 // VMReg max_arg, 766 int comp_args_on_stack, // VMRegStackSlots 767 const BasicType *sig_bt, 768 const VMRegPair *regs) { 769 770 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 771 // layout. Lesp was saved by the calling I-frame and will be restored on 772 // return. Meanwhile, outgoing arg space is all owned by the callee 773 // C-frame, so we can mangle it at will. After adjusting the frame size, 774 // hoist register arguments and repack other args according to the compiled 775 // code convention. Finally, end in a jump to the compiled code. The entry 776 // point address is the start of the buffer. 777 778 // We will only enter here from an interpreted frame and never from after 779 // passing thru a c2i. Azul allowed this but we do not. If we lose the 780 // race and use a c2i we will remain interpreted for the race loser(s). 781 // This removes all sorts of headaches on the x86 side and also eliminates 782 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 783 784 // More detail: 785 // Adapters can be frameless because they do not require the caller 786 // to perform additional cleanup work, such as correcting the stack pointer. 787 // An i2c adapter is frameless because the *caller* frame, which is interpreted, 788 // routinely repairs its own stack pointer (from interpreter_frame_last_sp), 789 // even if a callee has modified the stack pointer. 790 // A c2i adapter is frameless because the *callee* frame, which is interpreted, 791 // routinely repairs its caller's stack pointer (from sender_sp, which is set 792 // up via the senderSP register). 793 // In other words, if *either* the caller or callee is interpreted, we can 794 // get the stack pointer repaired after a call. 795 // This is why c2i and i2c adapters cannot be indefinitely composed. 796 // In particular, if a c2i adapter were to somehow call an i2c adapter, 797 // both caller and callee would be compiled methods, and neither would 798 // clean up the stack pointer changes performed by the two adapters. 799 // If this happens, control eventually transfers back to the compiled 800 // caller, but with an uncorrected stack, causing delayed havoc. 801 802 if (VerifyAdapterCalls && 803 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { 804 // So, let's test for cascading c2i/i2c adapters right now. 805 // assert(Interpreter::contains($return_addr) || 806 // StubRoutines::contains($return_addr), 807 // "i2c adapter must return to an interpreter frame"); 808 __ block_comment("verify_i2c { "); 809 Label L_ok; 810 if (Interpreter::code() != NULL) 811 range_check(masm, O7, O0, O1, 812 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 813 L_ok); 814 if (StubRoutines::code1() != NULL) 815 range_check(masm, O7, O0, O1, 816 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), 817 L_ok); 818 if (StubRoutines::code2() != NULL) 819 range_check(masm, O7, O0, O1, 820 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), 821 L_ok); 822 const char* msg = "i2c adapter must return to an interpreter frame"; 823 __ block_comment(msg); 824 __ stop(msg); 825 __ bind(L_ok); 826 __ block_comment("} verify_i2ce "); 827 } 828 829 // As you can see from the list of inputs & outputs there are not a lot 830 // of temp registers to work with: mostly G1, G3 & G4. 831 832 // Inputs: 833 // G2_thread - TLS 834 // G5_method - Method oop 835 // G4 (Gargs) - Pointer to interpreter's args 836 // O0..O4 - free for scratch 837 // O5_savedSP - Caller's saved SP, to be restored if needed 838 // O6 - Current SP! 839 // O7 - Valid return address 840 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 841 842 // Outputs: 843 // G2_thread - TLS 844 // O0-O5 - Outgoing args in compiled layout 845 // O6 - Adjusted or restored SP 846 // O7 - Valid return address 847 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 848 // F0-F7 - more outgoing args 849 850 851 // Gargs is the incoming argument base, and also an outgoing argument. 852 __ sub(Gargs, BytesPerWord, Gargs); 853 854 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME 855 // WITH O7 HOLDING A VALID RETURN PC 856 // 857 // | | 858 // : java stack : 859 // | | 860 // +--------------+ <--- start of outgoing args 861 // | receiver | | 862 // : rest of args : |---size is java-arg-words 863 // | | | 864 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I 865 // | | | 866 // : unused : |---Space for max Java stack, plus stack alignment 867 // | | | 868 // +--------------+ <--- SP + 16*wordsize 869 // | | 870 // : window : 871 // | | 872 // +--------------+ <--- SP 873 874 // WE REPACK THE STACK. We use the common calling convention layout as 875 // discovered by calling SharedRuntime::calling_convention. We assume it 876 // causes an arbitrary shuffle of memory, which may require some register 877 // temps to do the shuffle. We hope for (and optimize for) the case where 878 // temps are not needed. We may have to resize the stack slightly, in case 879 // we need alignment padding (32-bit interpreter can pass longs & doubles 880 // misaligned, but the compilers expect them aligned). 881 // 882 // | | 883 // : java stack : 884 // | | 885 // +--------------+ <--- start of outgoing args 886 // | pad, align | | 887 // +--------------+ | 888 // | ints, longs, | | 889 // | floats, | |---Outgoing stack args. 890 // : doubles : | First few args in registers. 891 // | | | 892 // +--------------+ <--- SP' + 16*wordsize 893 // | | 894 // : window : 895 // | | 896 // +--------------+ <--- SP' 897 898 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME 899 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP 900 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN. 901 902 // Cut-out for having no stack args. Since up to 6 args are passed 903 // in registers, we will commonly have no stack args. 904 if (comp_args_on_stack > 0) { 905 // Convert VMReg stack slots to words. 906 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 907 // Round up to miminum stack alignment, in wordSize 908 comp_words_on_stack = round_to(comp_words_on_stack, 2); 909 // Now compute the distance from Lesp to SP. This calculation does not 910 // include the space for total_args_passed because Lesp has not yet popped 911 // the arguments. 912 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 913 } 914 915 // Now generate the shuffle code. Pick up all register args and move the 916 // rest through G1_scratch. 917 for (int i = 0; i < total_args_passed; i++) { 918 if (sig_bt[i] == T_VOID) { 919 // Longs and doubles are passed in native word order, but misaligned 920 // in the 32-bit build. 921 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 922 continue; 923 } 924 925 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the 926 // 32-bit build and aligned in the 64-bit build. Look for the obvious 927 // ldx/lddf optimizations. 928 929 // Load in argument order going down. 930 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize; 931 set_Rdisp(G1_scratch); 932 933 VMReg r_1 = regs[i].first(); 934 VMReg r_2 = regs[i].second(); 935 if (!r_1->is_valid()) { 936 assert(!r_2->is_valid(), ""); 937 continue; 938 } 939 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9 940 r_1 = F8->as_VMReg(); // as part of the load/store shuffle 941 if (r_2->is_valid()) r_2 = r_1->next(); 942 } 943 if (r_1->is_Register()) { // Register argument 944 Register r = r_1->as_Register()->after_restore(); 945 if (!r_2->is_valid()) { 946 __ ld(Gargs, arg_slot(ld_off), r); 947 } else { 948 #ifdef _LP64 949 // In V9, longs are given 2 64-bit slots in the interpreter, but the 950 // data is passed in only 1 slot. 951 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ? 952 next_arg_slot(ld_off) : arg_slot(ld_off); 953 __ ldx(Gargs, slot, r); 954 #else 955 fatal("longs should be on stack"); 956 #endif 957 } 958 } else { 959 assert(r_1->is_FloatRegister(), ""); 960 if (!r_2->is_valid()) { 961 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); 962 } else { 963 #ifdef _LP64 964 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 965 // data is passed in only 1 slot. This code also handles longs that 966 // are passed on the stack, but need a stack-to-stack move through a 967 // spare float register. 968 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? 969 next_arg_slot(ld_off) : arg_slot(ld_off); 970 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); 971 #else 972 // Need to marshal 64-bit value from misaligned Lesp loads 973 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister()); 974 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister()); 975 #endif 976 } 977 } 978 // Was the argument really intended to be on the stack, but was loaded 979 // into F8/F9? 980 if (regs[i].first()->is_stack()) { 981 assert(r_1->as_FloatRegister() == F8, "fix this code"); 982 // Convert stack slot to an SP offset 983 int st_off = reg2offset(regs[i].first()) + STACK_BIAS; 984 // Store down the shuffled stack word. Target address _is_ aligned. 985 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp); 986 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot); 987 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot); 988 } 989 } 990 991 // Jump to the compiled code just as if compiled code was doing it. 992 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3); 993 994 // 6243940 We might end up in handle_wrong_method if 995 // the callee is deoptimized as we race thru here. If that 996 // happens we don't want to take a safepoint because the 997 // caller frame will look interpreted and arguments are now 998 // "compiled" so it is much better to make this transition 999 // invisible to the stack walking code. Unfortunately if 1000 // we try and find the callee by normal means a safepoint 1001 // is possible. So we stash the desired callee in the thread 1002 // and the vm will find there should this case occur. 1003 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); 1004 __ st_ptr(G5_method, callee_target_addr); 1005 1006 if (StressNonEntrant) { 1007 // Open a big window for deopt failure 1008 __ save_frame(0); 1009 __ mov(G0, L0); 1010 Label loop; 1011 __ bind(loop); 1012 __ sub(L0, 1, L0); 1013 __ br_null_short(L0, Assembler::pt, loop); 1014 __ restore(); 1015 } 1016 1017 __ jmpl(G3, 0, G0); 1018 __ delayed()->nop(); 1019 } 1020 1021 // --------------------------------------------------------------- 1022 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1023 int total_args_passed, 1024 // VMReg max_arg, 1025 int comp_args_on_stack, // VMRegStackSlots 1026 const BasicType *sig_bt, 1027 const VMRegPair *regs, 1028 AdapterFingerPrint* fingerprint) { 1029 address i2c_entry = __ pc(); 1030 1031 AdapterGenerator agen(masm); 1032 1033 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 1034 1035 1036 // ------------------------------------------------------------------------- 1037 // Generate a C2I adapter. On entry we know G5 holds the Method*. The 1038 // args start out packed in the compiled layout. They need to be unpacked 1039 // into the interpreter layout. This will almost always require some stack 1040 // space. We grow the current (compiled) stack, then repack the args. We 1041 // finally end in a jump to the generic interpreter entry point. On exit 1042 // from the interpreter, the interpreter will restore our SP (lest the 1043 // compiled code, which relys solely on SP and not FP, get sick). 1044 1045 address c2i_unverified_entry = __ pc(); 1046 Label L_skip_fixup; 1047 { 1048 Register R_temp = G1; // another scratch register 1049 1050 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1051 1052 __ verify_oop(O0); 1053 __ load_klass(O0, G3_scratch); 1054 1055 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp); 1056 __ cmp(G3_scratch, R_temp); 1057 1058 Label ok, ok2; 1059 __ brx(Assembler::equal, false, Assembler::pt, ok); 1060 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method); 1061 __ jump_to(ic_miss, G3_scratch); 1062 __ delayed()->nop(); 1063 1064 __ bind(ok); 1065 // Method might have been compiled since the call site was patched to 1066 // interpreted if that is the case treat it as a miss so we can get 1067 // the call site corrected. 1068 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 1069 __ bind(ok2); 1070 __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup); 1071 __ delayed()->nop(); 1072 __ jump_to(ic_miss, G3_scratch); 1073 __ delayed()->nop(); 1074 1075 } 1076 1077 address c2i_entry = __ pc(); 1078 1079 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup); 1080 1081 __ flush(); 1082 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 1083 1084 } 1085 1086 // Helper function for native calling conventions 1087 static VMReg int_stk_helper( int i ) { 1088 // Bias any stack based VMReg we get by ignoring the window area 1089 // but not the register parameter save area. 1090 // 1091 // This is strange for the following reasons. We'd normally expect 1092 // the calling convention to return an VMReg for a stack slot 1093 // completely ignoring any abi reserved area. C2 thinks of that 1094 // abi area as only out_preserve_stack_slots. This does not include 1095 // the area allocated by the C abi to store down integer arguments 1096 // because the java calling convention does not use it. So 1097 // since c2 assumes that there are only out_preserve_stack_slots 1098 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack 1099 // location the c calling convention must add in this bias amount 1100 // to make up for the fact that the out_preserve_stack_slots is 1101 // insufficient for C calls. What a mess. I sure hope those 6 1102 // stack words were worth it on every java call! 1103 1104 // Another way of cleaning this up would be for out_preserve_stack_slots 1105 // to take a parameter to say whether it was C or java calling conventions. 1106 // Then things might look a little better (but not much). 1107 1108 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM; 1109 if( mem_parm_offset < 0 ) { 1110 return as_oRegister(i)->as_VMReg(); 1111 } else { 1112 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word; 1113 // Now return a biased offset that will be correct when out_preserve_slots is added back in 1114 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots()); 1115 } 1116 } 1117 1118 1119 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1120 VMRegPair *regs, 1121 int total_args_passed) { 1122 1123 // Return the number of VMReg stack_slots needed for the args. 1124 // This value does not include an abi space (like register window 1125 // save area). 1126 1127 // The native convention is V8 if !LP64 1128 // The LP64 convention is the V9 convention which is slightly more sane. 1129 1130 // We return the amount of VMReg stack slots we need to reserve for all 1131 // the arguments NOT counting out_preserve_stack_slots. Since we always 1132 // have space for storing at least 6 registers to memory we start with that. 1133 // See int_stk_helper for a further discussion. 1134 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); 1135 1136 #ifdef _LP64 1137 // V9 convention: All things "as-if" on double-wide stack slots. 1138 // Hoist any int/ptr/long's in the first 6 to int regs. 1139 // Hoist any flt/dbl's in the first 16 dbl regs. 1140 int j = 0; // Count of actual args, not HALVES 1141 for( int i=0; i<total_args_passed; i++, j++ ) { 1142 switch( sig_bt[i] ) { 1143 case T_BOOLEAN: 1144 case T_BYTE: 1145 case T_CHAR: 1146 case T_INT: 1147 case T_SHORT: 1148 regs[i].set1( int_stk_helper( j ) ); break; 1149 case T_LONG: 1150 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1151 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1152 case T_ARRAY: 1153 case T_OBJECT: 1154 case T_METADATA: 1155 regs[i].set2( int_stk_helper( j ) ); 1156 break; 1157 case T_FLOAT: 1158 if ( j < 16 ) { 1159 // V9ism: floats go in ODD registers 1160 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg()); 1161 } else { 1162 // V9ism: floats go in ODD stack slot 1163 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1))); 1164 } 1165 break; 1166 case T_DOUBLE: 1167 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1168 if ( j < 16 ) { 1169 // V9ism: doubles go in EVEN/ODD regs 1170 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg()); 1171 } else { 1172 // V9ism: doubles go in EVEN/ODD stack slots 1173 regs[i].set2(VMRegImpl::stack2reg(j<<1)); 1174 } 1175 break; 1176 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES 1177 default: 1178 ShouldNotReachHere(); 1179 } 1180 if (regs[i].first()->is_stack()) { 1181 int off = regs[i].first()->reg2stack(); 1182 if (off > max_stack_slots) max_stack_slots = off; 1183 } 1184 if (regs[i].second()->is_stack()) { 1185 int off = regs[i].second()->reg2stack(); 1186 if (off > max_stack_slots) max_stack_slots = off; 1187 } 1188 } 1189 1190 #else // _LP64 1191 // V8 convention: first 6 things in O-regs, rest on stack. 1192 // Alignment is willy-nilly. 1193 for( int i=0; i<total_args_passed; i++ ) { 1194 switch( sig_bt[i] ) { 1195 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1196 case T_ARRAY: 1197 case T_BOOLEAN: 1198 case T_BYTE: 1199 case T_CHAR: 1200 case T_FLOAT: 1201 case T_INT: 1202 case T_OBJECT: 1203 case T_METADATA: 1204 case T_SHORT: 1205 regs[i].set1( int_stk_helper( i ) ); 1206 break; 1207 case T_DOUBLE: 1208 case T_LONG: 1209 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1210 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) ); 1211 break; 1212 case T_VOID: regs[i].set_bad(); break; 1213 default: 1214 ShouldNotReachHere(); 1215 } 1216 if (regs[i].first()->is_stack()) { 1217 int off = regs[i].first()->reg2stack(); 1218 if (off > max_stack_slots) max_stack_slots = off; 1219 } 1220 if (regs[i].second()->is_stack()) { 1221 int off = regs[i].second()->reg2stack(); 1222 if (off > max_stack_slots) max_stack_slots = off; 1223 } 1224 } 1225 #endif // _LP64 1226 1227 return round_to(max_stack_slots + 1, 2); 1228 1229 } 1230 1231 1232 // --------------------------------------------------------------------------- 1233 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1234 switch (ret_type) { 1235 case T_FLOAT: 1236 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS); 1237 break; 1238 case T_DOUBLE: 1239 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS); 1240 break; 1241 } 1242 } 1243 1244 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1245 switch (ret_type) { 1246 case T_FLOAT: 1247 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0); 1248 break; 1249 case T_DOUBLE: 1250 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0); 1251 break; 1252 } 1253 } 1254 1255 // Check and forward and pending exception. Thread is stored in 1256 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there 1257 // is no exception handler. We merely pop this frame off and throw the 1258 // exception in the caller's frame. 1259 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) { 1260 Label L; 1261 __ br_null(Rex_oop, false, Assembler::pt, L); 1262 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1263 // Since this is a native call, we *know* the proper exception handler 1264 // without calling into the VM: it's the empty function. Just pop this 1265 // frame and then jump to forward_exception_entry; O7 will contain the 1266 // native caller's return PC. 1267 AddressLiteral exception_entry(StubRoutines::forward_exception_entry()); 1268 __ jump_to(exception_entry, G3_scratch); 1269 __ delayed()->restore(); // Pop this frame off. 1270 __ bind(L); 1271 } 1272 1273 // A simple move of integer like type 1274 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1275 if (src.first()->is_stack()) { 1276 if (dst.first()->is_stack()) { 1277 // stack to stack 1278 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1279 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1280 } else { 1281 // stack to reg 1282 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1283 } 1284 } else if (dst.first()->is_stack()) { 1285 // reg to stack 1286 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1287 } else { 1288 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1289 } 1290 } 1291 1292 // On 64 bit we will store integer like items to the stack as 1293 // 64 bits items (sparc abi) even though java would only store 1294 // 32bits for a parameter. On 32bit it will simply be 32 bits 1295 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 1296 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1297 if (src.first()->is_stack()) { 1298 if (dst.first()->is_stack()) { 1299 // stack to stack 1300 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1301 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1302 } else { 1303 // stack to reg 1304 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1305 } 1306 } else if (dst.first()->is_stack()) { 1307 // reg to stack 1308 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1309 } else { 1310 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1311 } 1312 } 1313 1314 1315 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1316 if (src.first()->is_stack()) { 1317 if (dst.first()->is_stack()) { 1318 // stack to stack 1319 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1320 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1321 } else { 1322 // stack to reg 1323 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1324 } 1325 } else if (dst.first()->is_stack()) { 1326 // reg to stack 1327 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1328 } else { 1329 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1330 } 1331 } 1332 1333 1334 // An oop arg. Must pass a handle not the oop itself 1335 static void object_move(MacroAssembler* masm, 1336 OopMap* map, 1337 int oop_handle_offset, 1338 int framesize_in_slots, 1339 VMRegPair src, 1340 VMRegPair dst, 1341 bool is_receiver, 1342 int* receiver_offset) { 1343 1344 // must pass a handle. First figure out the location we use as a handle 1345 1346 if (src.first()->is_stack()) { 1347 // Oop is already on the stack 1348 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); 1349 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); 1350 __ ld_ptr(rHandle, 0, L4); 1351 #ifdef _LP64 1352 __ movr( Assembler::rc_z, L4, G0, rHandle ); 1353 #else 1354 __ tst( L4 ); 1355 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1356 #endif 1357 if (dst.first()->is_stack()) { 1358 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1359 } 1360 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1361 if (is_receiver) { 1362 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1363 } 1364 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1365 } else { 1366 // Oop is in an input register pass we must flush it to the stack 1367 const Register rOop = src.first()->as_Register(); 1368 const Register rHandle = L5; 1369 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; 1370 int offset = oop_slot*VMRegImpl::stack_slot_size; 1371 Label skip; 1372 __ st_ptr(rOop, SP, offset + STACK_BIAS); 1373 if (is_receiver) { 1374 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size; 1375 } 1376 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1377 __ add(SP, offset + STACK_BIAS, rHandle); 1378 #ifdef _LP64 1379 __ movr( Assembler::rc_z, rOop, G0, rHandle ); 1380 #else 1381 __ tst( rOop ); 1382 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1383 #endif 1384 1385 if (dst.first()->is_stack()) { 1386 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1387 } else { 1388 __ mov(rHandle, dst.first()->as_Register()); 1389 } 1390 } 1391 } 1392 1393 // A float arg may have to do float reg int reg conversion 1394 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1395 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1396 1397 if (src.first()->is_stack()) { 1398 if (dst.first()->is_stack()) { 1399 // stack to stack the easiest of the bunch 1400 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1401 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1402 } else { 1403 // stack to reg 1404 if (dst.first()->is_Register()) { 1405 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1406 } else { 1407 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1408 } 1409 } 1410 } else if (dst.first()->is_stack()) { 1411 // reg to stack 1412 if (src.first()->is_Register()) { 1413 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1414 } else { 1415 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1416 } 1417 } else { 1418 // reg to reg 1419 if (src.first()->is_Register()) { 1420 if (dst.first()->is_Register()) { 1421 // gpr -> gpr 1422 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1423 } else { 1424 // gpr -> fpr 1425 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS); 1426 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister()); 1427 } 1428 } else if (dst.first()->is_Register()) { 1429 // fpr -> gpr 1430 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS); 1431 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register()); 1432 } else { 1433 // fpr -> fpr 1434 // In theory these overlap but the ordering is such that this is likely a nop 1435 if ( src.first() != dst.first()) { 1436 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1437 } 1438 } 1439 } 1440 } 1441 1442 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1443 VMRegPair src_lo(src.first()); 1444 VMRegPair src_hi(src.second()); 1445 VMRegPair dst_lo(dst.first()); 1446 VMRegPair dst_hi(dst.second()); 1447 simple_move32(masm, src_lo, dst_lo); 1448 simple_move32(masm, src_hi, dst_hi); 1449 } 1450 1451 // A long move 1452 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1453 1454 // Do the simple ones here else do two int moves 1455 if (src.is_single_phys_reg() ) { 1456 if (dst.is_single_phys_reg()) { 1457 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1458 } else { 1459 // split src into two separate registers 1460 // Remember hi means hi address or lsw on sparc 1461 // Move msw to lsw 1462 if (dst.second()->is_reg()) { 1463 // MSW -> MSW 1464 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register()); 1465 // Now LSW -> LSW 1466 // this will only move lo -> lo and ignore hi 1467 VMRegPair split(dst.second()); 1468 simple_move32(masm, src, split); 1469 } else { 1470 VMRegPair split(src.first(), L4->as_VMReg()); 1471 // MSW -> MSW (lo ie. first word) 1472 __ srax(src.first()->as_Register(), 32, L4); 1473 split_long_move(masm, split, dst); 1474 } 1475 } 1476 } else if (dst.is_single_phys_reg()) { 1477 if (src.is_adjacent_aligned_on_stack(2)) { 1478 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1479 } else { 1480 // dst is a single reg. 1481 // Remember lo is low address not msb for stack slots 1482 // and lo is the "real" register for registers 1483 // src is 1484 1485 VMRegPair split; 1486 1487 if (src.first()->is_reg()) { 1488 // src.lo (msw) is a reg, src.hi is stk/reg 1489 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg] 1490 split.set_pair(dst.first(), src.first()); 1491 } else { 1492 // msw is stack move to L5 1493 // lsw is stack move to dst.lo (real reg) 1494 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5 1495 split.set_pair(dst.first(), L5->as_VMReg()); 1496 } 1497 1498 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg) 1499 // msw -> src.lo/L5, lsw -> dst.lo 1500 split_long_move(masm, src, split); 1501 1502 // So dst now has the low order correct position the 1503 // msw half 1504 __ sllx(split.first()->as_Register(), 32, L5); 1505 1506 const Register d = dst.first()->as_Register(); 1507 __ or3(L5, d, d); 1508 } 1509 } else { 1510 // For LP64 we can probably do better. 1511 split_long_move(masm, src, dst); 1512 } 1513 } 1514 1515 // A double move 1516 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1517 1518 // The painful thing here is that like long_move a VMRegPair might be 1519 // 1: a single physical register 1520 // 2: two physical registers (v8) 1521 // 3: a physical reg [lo] and a stack slot [hi] (v8) 1522 // 4: two stack slots 1523 1524 // Since src is always a java calling convention we know that the src pair 1525 // is always either all registers or all stack (and aligned?) 1526 1527 // in a register [lo] and a stack slot [hi] 1528 if (src.first()->is_stack()) { 1529 if (dst.first()->is_stack()) { 1530 // stack to stack the easiest of the bunch 1531 // ought to be a way to do this where if alignment is ok we use ldd/std when possible 1532 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1533 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1534 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1535 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1536 } else { 1537 // stack to reg 1538 if (dst.second()->is_stack()) { 1539 // stack -> reg, stack -> stack 1540 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1541 if (dst.first()->is_Register()) { 1542 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1543 } else { 1544 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1545 } 1546 // This was missing. (very rare case) 1547 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1548 } else { 1549 // stack -> reg 1550 // Eventually optimize for alignment QQQ 1551 if (dst.first()->is_Register()) { 1552 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1553 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register()); 1554 } else { 1555 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1556 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister()); 1557 } 1558 } 1559 } 1560 } else if (dst.first()->is_stack()) { 1561 // reg to stack 1562 if (src.first()->is_Register()) { 1563 // Eventually optimize for alignment QQQ 1564 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1565 if (src.second()->is_stack()) { 1566 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1567 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1568 } else { 1569 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS); 1570 } 1571 } else { 1572 // fpr to stack 1573 if (src.second()->is_stack()) { 1574 ShouldNotReachHere(); 1575 } else { 1576 // Is the stack aligned? 1577 if (reg2offset(dst.first()) & 0x7) { 1578 // No do as pairs 1579 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1580 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS); 1581 } else { 1582 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1583 } 1584 } 1585 } 1586 } else { 1587 // reg to reg 1588 if (src.first()->is_Register()) { 1589 if (dst.first()->is_Register()) { 1590 // gpr -> gpr 1591 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1592 __ mov(src.second()->as_Register(), dst.second()->as_Register()); 1593 } else { 1594 // gpr -> fpr 1595 // ought to be able to do a single store 1596 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS); 1597 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS); 1598 // ought to be able to do a single load 1599 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister()); 1600 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister()); 1601 } 1602 } else if (dst.first()->is_Register()) { 1603 // fpr -> gpr 1604 // ought to be able to do a single store 1605 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS); 1606 // ought to be able to do a single load 1607 // REMEMBER first() is low address not LSB 1608 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register()); 1609 if (dst.second()->is_Register()) { 1610 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register()); 1611 } else { 1612 __ ld(FP, -4 + STACK_BIAS, L4); 1613 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1614 } 1615 } else { 1616 // fpr -> fpr 1617 // In theory these overlap but the ordering is such that this is likely a nop 1618 if ( src.first() != dst.first()) { 1619 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1620 } 1621 } 1622 } 1623 } 1624 1625 // Creates an inner frame if one hasn't already been created, and 1626 // saves a copy of the thread in L7_thread_cache 1627 static void create_inner_frame(MacroAssembler* masm, bool* already_created) { 1628 if (!*already_created) { 1629 __ save_frame(0); 1630 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below 1631 // Don't use save_thread because it smashes G2 and we merely want to save a 1632 // copy 1633 __ mov(G2_thread, L7_thread_cache); 1634 *already_created = true; 1635 } 1636 } 1637 1638 1639 static void save_or_restore_arguments(MacroAssembler* masm, 1640 const int stack_slots, 1641 const int total_in_args, 1642 const int arg_save_area, 1643 OopMap* map, 1644 VMRegPair* in_regs, 1645 BasicType* in_sig_bt) { 1646 // if map is non-NULL then the code should store the values, 1647 // otherwise it should load them. 1648 if (map != NULL) { 1649 // Fill in the map 1650 for (int i = 0; i < total_in_args; i++) { 1651 if (in_sig_bt[i] == T_ARRAY) { 1652 if (in_regs[i].first()->is_stack()) { 1653 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1654 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1655 } else if (in_regs[i].first()->is_Register()) { 1656 map->set_oop(in_regs[i].first()); 1657 } else { 1658 ShouldNotReachHere(); 1659 } 1660 } 1661 } 1662 } 1663 1664 // Save or restore double word values 1665 int handle_index = 0; 1666 for (int i = 0; i < total_in_args; i++) { 1667 int slot = handle_index + arg_save_area; 1668 int offset = slot * VMRegImpl::stack_slot_size; 1669 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) { 1670 const Register reg = in_regs[i].first()->as_Register(); 1671 if (reg->is_global()) { 1672 handle_index += 2; 1673 assert(handle_index <= stack_slots, "overflow"); 1674 if (map != NULL) { 1675 __ stx(reg, SP, offset + STACK_BIAS); 1676 } else { 1677 __ ldx(SP, offset + STACK_BIAS, reg); 1678 } 1679 } 1680 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) { 1681 handle_index += 2; 1682 assert(handle_index <= stack_slots, "overflow"); 1683 if (map != NULL) { 1684 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1685 } else { 1686 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1687 } 1688 } 1689 } 1690 // Save floats 1691 for (int i = 0; i < total_in_args; i++) { 1692 int slot = handle_index + arg_save_area; 1693 int offset = slot * VMRegImpl::stack_slot_size; 1694 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) { 1695 handle_index++; 1696 assert(handle_index <= stack_slots, "overflow"); 1697 if (map != NULL) { 1698 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1699 } else { 1700 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1701 } 1702 } 1703 } 1704 1705 } 1706 1707 1708 // Check GC_locker::needs_gc and enter the runtime if it's true. This 1709 // keeps a new JNI critical region from starting until a GC has been 1710 // forced. Save down any oops in registers and describe them in an 1711 // OopMap. 1712 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1713 const int stack_slots, 1714 const int total_in_args, 1715 const int arg_save_area, 1716 OopMapSet* oop_maps, 1717 VMRegPair* in_regs, 1718 BasicType* in_sig_bt) { 1719 __ block_comment("check GC_locker::needs_gc"); 1720 Label cont; 1721 AddressLiteral sync_state(GC_locker::needs_gc_address()); 1722 __ load_bool_contents(sync_state, G3_scratch); 1723 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont); 1724 __ delayed()->nop(); 1725 1726 // Save down any values that are live in registers and call into the 1727 // runtime to halt for a GC 1728 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1729 save_or_restore_arguments(masm, stack_slots, total_in_args, 1730 arg_save_area, map, in_regs, in_sig_bt); 1731 1732 __ mov(G2_thread, L7_thread_cache); 1733 1734 __ set_last_Java_frame(SP, noreg); 1735 1736 __ block_comment("block_for_jni_critical"); 1737 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type); 1738 __ delayed()->mov(L7_thread_cache, O0); 1739 oop_maps->add_gc_map( __ offset(), map); 1740 1741 __ restore_thread(L7_thread_cache); // restore G2_thread 1742 __ reset_last_Java_frame(); 1743 1744 // Reload all the register arguments 1745 save_or_restore_arguments(masm, stack_slots, total_in_args, 1746 arg_save_area, NULL, in_regs, in_sig_bt); 1747 1748 __ bind(cont); 1749 #ifdef ASSERT 1750 if (StressCriticalJNINatives) { 1751 // Stress register saving 1752 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1753 save_or_restore_arguments(masm, stack_slots, total_in_args, 1754 arg_save_area, map, in_regs, in_sig_bt); 1755 // Destroy argument registers 1756 for (int i = 0; i < total_in_args; i++) { 1757 if (in_regs[i].first()->is_Register()) { 1758 const Register reg = in_regs[i].first()->as_Register(); 1759 if (reg->is_global()) { 1760 __ mov(G0, reg); 1761 } 1762 } else if (in_regs[i].first()->is_FloatRegister()) { 1763 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1764 } 1765 } 1766 1767 save_or_restore_arguments(masm, stack_slots, total_in_args, 1768 arg_save_area, NULL, in_regs, in_sig_bt); 1769 } 1770 #endif 1771 } 1772 1773 // Unpack an array argument into a pointer to the body and the length 1774 // if the array is non-null, otherwise pass 0 for both. 1775 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { 1776 // Pass the length, ptr pair 1777 Label is_null, done; 1778 if (reg.first()->is_stack()) { 1779 VMRegPair tmp = reg64_to_VMRegPair(L2); 1780 // Load the arg up from the stack 1781 move_ptr(masm, reg, tmp); 1782 reg = tmp; 1783 } 1784 __ cmp(reg.first()->as_Register(), G0); 1785 __ brx(Assembler::equal, false, Assembler::pt, is_null); 1786 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4); 1787 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg); 1788 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4); 1789 move32_64(masm, reg64_to_VMRegPair(L4), length_arg); 1790 __ ba_short(done); 1791 __ bind(is_null); 1792 // Pass zeros 1793 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg); 1794 move32_64(masm, reg64_to_VMRegPair(G0), length_arg); 1795 __ bind(done); 1796 } 1797 1798 static void verify_oop_args(MacroAssembler* masm, 1799 methodHandle method, 1800 const BasicType* sig_bt, 1801 const VMRegPair* regs) { 1802 Register temp_reg = G5_method; // not part of any compiled calling seq 1803 if (VerifyOops) { 1804 for (int i = 0; i < method->size_of_parameters(); i++) { 1805 if (sig_bt[i] == T_OBJECT || 1806 sig_bt[i] == T_ARRAY) { 1807 VMReg r = regs[i].first(); 1808 assert(r->is_valid(), "bad oop arg"); 1809 if (r->is_stack()) { 1810 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1811 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg); 1812 __ ld_ptr(SP, ld_off, temp_reg); 1813 __ verify_oop(temp_reg); 1814 } else { 1815 __ verify_oop(r->as_Register()); 1816 } 1817 } 1818 } 1819 } 1820 } 1821 1822 static void gen_special_dispatch(MacroAssembler* masm, 1823 methodHandle method, 1824 const BasicType* sig_bt, 1825 const VMRegPair* regs) { 1826 verify_oop_args(masm, method, sig_bt, regs); 1827 vmIntrinsics::ID iid = method->intrinsic_id(); 1828 1829 // Now write the args into the outgoing interpreter space 1830 bool has_receiver = false; 1831 Register receiver_reg = noreg; 1832 int member_arg_pos = -1; 1833 Register member_reg = noreg; 1834 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1835 if (ref_kind != 0) { 1836 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1837 member_reg = G5_method; // known to be free at this point 1838 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1839 } else if (iid == vmIntrinsics::_invokeBasic) { 1840 has_receiver = true; 1841 } else { 1842 fatal(err_msg_res("unexpected intrinsic id %d", iid)); 1843 } 1844 1845 if (member_reg != noreg) { 1846 // Load the member_arg into register, if necessary. 1847 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1848 VMReg r = regs[member_arg_pos].first(); 1849 if (r->is_stack()) { 1850 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1851 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1852 __ ld_ptr(SP, ld_off, member_reg); 1853 } else { 1854 // no data motion is needed 1855 member_reg = r->as_Register(); 1856 } 1857 } 1858 1859 if (has_receiver) { 1860 // Make sure the receiver is loaded into a register. 1861 assert(method->size_of_parameters() > 0, "oob"); 1862 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1863 VMReg r = regs[0].first(); 1864 assert(r->is_valid(), "bad receiver arg"); 1865 if (r->is_stack()) { 1866 // Porting note: This assumes that compiled calling conventions always 1867 // pass the receiver oop in a register. If this is not true on some 1868 // platform, pick a temp and load the receiver from stack. 1869 fatal("receiver always in a register"); 1870 receiver_reg = G3_scratch; // known to be free at this point 1871 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1872 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1873 __ ld_ptr(SP, ld_off, receiver_reg); 1874 } else { 1875 // no data motion is needed 1876 receiver_reg = r->as_Register(); 1877 } 1878 } 1879 1880 // Figure out which address we are really jumping to: 1881 MethodHandles::generate_method_handle_dispatch(masm, iid, 1882 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1883 } 1884 1885 // --------------------------------------------------------------------------- 1886 // Generate a native wrapper for a given method. The method takes arguments 1887 // in the Java compiled code convention, marshals them to the native 1888 // convention (handlizes oops, etc), transitions to native, makes the call, 1889 // returns to java state (possibly blocking), unhandlizes any result and 1890 // returns. 1891 // 1892 // Critical native functions are a shorthand for the use of 1893 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1894 // functions. The wrapper is expected to unpack the arguments before 1895 // passing them to the callee and perform checks before and after the 1896 // native call to ensure that they GC_locker 1897 // lock_critical/unlock_critical semantics are followed. Some other 1898 // parts of JNI setup are skipped like the tear down of the JNI handle 1899 // block and the check for pending exceptions it's impossible for them 1900 // to be thrown. 1901 // 1902 // They are roughly structured like this: 1903 // if (GC_locker::needs_gc()) 1904 // SharedRuntime::block_for_jni_critical(); 1905 // tranistion to thread_in_native 1906 // unpack arrray arguments and call native entry point 1907 // check for safepoint in progress 1908 // check if any thread suspend flags are set 1909 // call into JVM and possible unlock the JNI critical 1910 // if a GC was suppressed while in the critical native. 1911 // transition back to thread_in_Java 1912 // return to caller 1913 // 1914 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1915 methodHandle method, 1916 int compile_id, 1917 BasicType* in_sig_bt, 1918 VMRegPair* in_regs, 1919 BasicType ret_type) { 1920 if (method->is_method_handle_intrinsic()) { 1921 vmIntrinsics::ID iid = method->intrinsic_id(); 1922 intptr_t start = (intptr_t)__ pc(); 1923 int vep_offset = ((intptr_t)__ pc()) - start; 1924 gen_special_dispatch(masm, 1925 method, 1926 in_sig_bt, 1927 in_regs); 1928 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1929 __ flush(); 1930 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1931 return nmethod::new_native_nmethod(method, 1932 compile_id, 1933 masm->code(), 1934 vep_offset, 1935 frame_complete, 1936 stack_slots / VMRegImpl::slots_per_word, 1937 in_ByteSize(-1), 1938 in_ByteSize(-1), 1939 (OopMapSet*)NULL); 1940 } 1941 bool is_critical_native = true; 1942 address native_func = method->critical_native_function(); 1943 if (native_func == NULL) { 1944 native_func = method->native_function(); 1945 is_critical_native = false; 1946 } 1947 assert(native_func != NULL, "must have function"); 1948 1949 // Native nmethod wrappers never take possesion of the oop arguments. 1950 // So the caller will gc the arguments. The only thing we need an 1951 // oopMap for is if the call is static 1952 // 1953 // An OopMap for lock (and class if static), and one for the VM call itself 1954 OopMapSet *oop_maps = new OopMapSet(); 1955 intptr_t start = (intptr_t)__ pc(); 1956 1957 // First thing make an ic check to see if we should even be here 1958 { 1959 Label L; 1960 const Register temp_reg = G3_scratch; 1961 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1962 __ verify_oop(O0); 1963 __ load_klass(O0, temp_reg); 1964 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L); 1965 1966 __ jump_to(ic_miss, temp_reg); 1967 __ delayed()->nop(); 1968 __ align(CodeEntryAlignment); 1969 __ bind(L); 1970 } 1971 1972 int vep_offset = ((intptr_t)__ pc()) - start; 1973 1974 #ifdef COMPILER1 1975 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) { 1976 // Object.hashCode can pull the hashCode from the header word 1977 // instead of doing a full VM transition once it's been computed. 1978 // Since hashCode is usually polymorphic at call sites we can't do 1979 // this optimization at the call site without a lot of work. 1980 Label slowCase; 1981 Register receiver = O0; 1982 Register result = O0; 1983 Register header = G3_scratch; 1984 Register hash = G3_scratch; // overwrite header value with hash value 1985 Register mask = G1; // to get hash field from header 1986 1987 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked. 1988 // We depend on hash_mask being at most 32 bits and avoid the use of 1989 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 1990 // vm: see markOop.hpp. 1991 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header); 1992 __ sethi(markOopDesc::hash_mask, mask); 1993 __ btst(markOopDesc::unlocked_value, header); 1994 __ br(Assembler::zero, false, Assembler::pn, slowCase); 1995 if (UseBiasedLocking) { 1996 // Check if biased and fall through to runtime if so 1997 __ delayed()->nop(); 1998 __ btst(markOopDesc::biased_lock_bit_in_place, header); 1999 __ br(Assembler::notZero, false, Assembler::pn, slowCase); 2000 } 2001 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask); 2002 2003 // Check for a valid (non-zero) hash code and get its value. 2004 #ifdef _LP64 2005 __ srlx(header, markOopDesc::hash_shift, hash); 2006 #else 2007 __ srl(header, markOopDesc::hash_shift, hash); 2008 #endif 2009 __ andcc(hash, mask, hash); 2010 __ br(Assembler::equal, false, Assembler::pn, slowCase); 2011 __ delayed()->nop(); 2012 2013 // leaf return. 2014 __ retl(); 2015 __ delayed()->mov(hash, result); 2016 __ bind(slowCase); 2017 } 2018 #endif // COMPILER1 2019 2020 2021 // We have received a description of where all the java arg are located 2022 // on entry to the wrapper. We need to convert these args to where 2023 // the jni function will expect them. To figure out where they go 2024 // we convert the java signature to a C signature by inserting 2025 // the hidden arguments as arg[0] and possibly arg[1] (static method) 2026 2027 const int total_in_args = method->size_of_parameters(); 2028 int total_c_args = total_in_args; 2029 int total_save_slots = 6 * VMRegImpl::slots_per_word; 2030 if (!is_critical_native) { 2031 total_c_args += 1; 2032 if (method->is_static()) { 2033 total_c_args++; 2034 } 2035 } else { 2036 for (int i = 0; i < total_in_args; i++) { 2037 if (in_sig_bt[i] == T_ARRAY) { 2038 // These have to be saved and restored across the safepoint 2039 total_c_args++; 2040 } 2041 } 2042 } 2043 2044 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 2045 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 2046 BasicType* in_elem_bt = NULL; 2047 2048 int argc = 0; 2049 if (!is_critical_native) { 2050 out_sig_bt[argc++] = T_ADDRESS; 2051 if (method->is_static()) { 2052 out_sig_bt[argc++] = T_OBJECT; 2053 } 2054 2055 for (int i = 0; i < total_in_args ; i++ ) { 2056 out_sig_bt[argc++] = in_sig_bt[i]; 2057 } 2058 } else { 2059 Thread* THREAD = Thread::current(); 2060 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); 2061 SignatureStream ss(method->signature()); 2062 for (int i = 0; i < total_in_args ; i++ ) { 2063 if (in_sig_bt[i] == T_ARRAY) { 2064 // Arrays are passed as int, elem* pair 2065 out_sig_bt[argc++] = T_INT; 2066 out_sig_bt[argc++] = T_ADDRESS; 2067 Symbol* atype = ss.as_symbol(CHECK_NULL); 2068 const char* at = atype->as_C_string(); 2069 if (strlen(at) == 2) { 2070 assert(at[0] == '[', "must be"); 2071 switch (at[1]) { 2072 case 'B': in_elem_bt[i] = T_BYTE; break; 2073 case 'C': in_elem_bt[i] = T_CHAR; break; 2074 case 'D': in_elem_bt[i] = T_DOUBLE; break; 2075 case 'F': in_elem_bt[i] = T_FLOAT; break; 2076 case 'I': in_elem_bt[i] = T_INT; break; 2077 case 'J': in_elem_bt[i] = T_LONG; break; 2078 case 'S': in_elem_bt[i] = T_SHORT; break; 2079 case 'Z': in_elem_bt[i] = T_BOOLEAN; break; 2080 default: ShouldNotReachHere(); 2081 } 2082 } 2083 } else { 2084 out_sig_bt[argc++] = in_sig_bt[i]; 2085 in_elem_bt[i] = T_VOID; 2086 } 2087 if (in_sig_bt[i] != T_VOID) { 2088 assert(in_sig_bt[i] == ss.type(), "must match"); 2089 ss.next(); 2090 } 2091 } 2092 } 2093 2094 // Now figure out where the args must be stored and how much stack space 2095 // they require (neglecting out_preserve_stack_slots but space for storing 2096 // the 1st six register arguments). It's weird see int_stk_helper. 2097 // 2098 int out_arg_slots; 2099 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 2100 2101 if (is_critical_native) { 2102 // Critical natives may have to call out so they need a save area 2103 // for register arguments. 2104 int double_slots = 0; 2105 int single_slots = 0; 2106 for ( int i = 0; i < total_in_args; i++) { 2107 if (in_regs[i].first()->is_Register()) { 2108 const Register reg = in_regs[i].first()->as_Register(); 2109 switch (in_sig_bt[i]) { 2110 case T_ARRAY: 2111 case T_BOOLEAN: 2112 case T_BYTE: 2113 case T_SHORT: 2114 case T_CHAR: 2115 case T_INT: assert(reg->is_in(), "don't need to save these"); break; 2116 case T_LONG: if (reg->is_global()) double_slots++; break; 2117 default: ShouldNotReachHere(); 2118 } 2119 } else if (in_regs[i].first()->is_FloatRegister()) { 2120 switch (in_sig_bt[i]) { 2121 case T_FLOAT: single_slots++; break; 2122 case T_DOUBLE: double_slots++; break; 2123 default: ShouldNotReachHere(); 2124 } 2125 } 2126 } 2127 total_save_slots = double_slots * 2 + single_slots; 2128 } 2129 2130 // Compute framesize for the wrapper. We need to handlize all oops in 2131 // registers. We must create space for them here that is disjoint from 2132 // the windowed save area because we have no control over when we might 2133 // flush the window again and overwrite values that gc has since modified. 2134 // (The live window race) 2135 // 2136 // We always just allocate 6 word for storing down these object. This allow 2137 // us to simply record the base and use the Ireg number to decide which 2138 // slot to use. (Note that the reg number is the inbound number not the 2139 // outbound number). 2140 // We must shuffle args to match the native convention, and include var-args space. 2141 2142 // Calculate the total number of stack slots we will need. 2143 2144 // First count the abi requirement plus all of the outgoing args 2145 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2146 2147 // Now the space for the inbound oop handle area 2148 2149 int oop_handle_offset = round_to(stack_slots, 2); 2150 stack_slots += total_save_slots; 2151 2152 // Now any space we need for handlizing a klass if static method 2153 2154 int klass_slot_offset = 0; 2155 int klass_offset = -1; 2156 int lock_slot_offset = 0; 2157 bool is_static = false; 2158 2159 if (method->is_static()) { 2160 klass_slot_offset = stack_slots; 2161 stack_slots += VMRegImpl::slots_per_word; 2162 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2163 is_static = true; 2164 } 2165 2166 // Plus a lock if needed 2167 2168 if (method->is_synchronized()) { 2169 lock_slot_offset = stack_slots; 2170 stack_slots += VMRegImpl::slots_per_word; 2171 } 2172 2173 // Now a place to save return value or as a temporary for any gpr -> fpr moves 2174 stack_slots += 2; 2175 2176 // Ok The space we have allocated will look like: 2177 // 2178 // 2179 // FP-> | | 2180 // |---------------------| 2181 // | 2 slots for moves | 2182 // |---------------------| 2183 // | lock box (if sync) | 2184 // |---------------------| <- lock_slot_offset 2185 // | klass (if static) | 2186 // |---------------------| <- klass_slot_offset 2187 // | oopHandle area | 2188 // |---------------------| <- oop_handle_offset 2189 // | outbound memory | 2190 // | based arguments | 2191 // | | 2192 // |---------------------| 2193 // | vararg area | 2194 // |---------------------| 2195 // | | 2196 // SP-> | out_preserved_slots | 2197 // 2198 // 2199 2200 2201 // Now compute actual number of stack words we need rounding to make 2202 // stack properly aligned. 2203 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word); 2204 2205 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2206 2207 // Generate stack overflow check before creating frame 2208 __ generate_stack_overflow_check(stack_size); 2209 2210 // Generate a new frame for the wrapper. 2211 __ save(SP, -stack_size, SP); 2212 2213 int frame_complete = ((intptr_t)__ pc()) - start; 2214 2215 __ verify_thread(); 2216 2217 if (is_critical_native) { 2218 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, 2219 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 2220 } 2221 2222 // 2223 // We immediately shuffle the arguments so that any vm call we have to 2224 // make from here on out (sync slow path, jvmti, etc.) we will have 2225 // captured the oops from our caller and have a valid oopMap for 2226 // them. 2227 2228 // ----------------- 2229 // The Grand Shuffle 2230 // 2231 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2232 // (derived from JavaThread* which is in L7_thread_cache) and, if static, 2233 // the class mirror instead of a receiver. This pretty much guarantees that 2234 // register layout will not match. We ignore these extra arguments during 2235 // the shuffle. The shuffle is described by the two calling convention 2236 // vectors we have in our possession. We simply walk the java vector to 2237 // get the source locations and the c vector to get the destinations. 2238 // Because we have a new window and the argument registers are completely 2239 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about 2240 // here. 2241 2242 // This is a trick. We double the stack slots so we can claim 2243 // the oops in the caller's frame. Since we are sure to have 2244 // more args than the caller doubling is enough to make 2245 // sure we can capture all the incoming oop args from the 2246 // caller. 2247 // 2248 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2249 // Record sp-based slot for receiver on stack for non-static methods 2250 int receiver_offset = -1; 2251 2252 // We move the arguments backward because the floating point registers 2253 // destination will always be to a register with a greater or equal register 2254 // number or the stack. 2255 2256 #ifdef ASSERT 2257 bool reg_destroyed[RegisterImpl::number_of_registers]; 2258 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2259 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2260 reg_destroyed[r] = false; 2261 } 2262 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2263 freg_destroyed[f] = false; 2264 } 2265 2266 #endif /* ASSERT */ 2267 2268 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) { 2269 2270 #ifdef ASSERT 2271 if (in_regs[i].first()->is_Register()) { 2272 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2273 } else if (in_regs[i].first()->is_FloatRegister()) { 2274 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!"); 2275 } 2276 if (out_regs[c_arg].first()->is_Register()) { 2277 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2278 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 2279 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true; 2280 } 2281 #endif /* ASSERT */ 2282 2283 switch (in_sig_bt[i]) { 2284 case T_ARRAY: 2285 if (is_critical_native) { 2286 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]); 2287 c_arg--; 2288 break; 2289 } 2290 case T_OBJECT: 2291 assert(!is_critical_native, "no oop arguments"); 2292 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2293 ((i == 0) && (!is_static)), 2294 &receiver_offset); 2295 break; 2296 case T_VOID: 2297 break; 2298 2299 case T_FLOAT: 2300 float_move(masm, in_regs[i], out_regs[c_arg]); 2301 break; 2302 2303 case T_DOUBLE: 2304 assert( i + 1 < total_in_args && 2305 in_sig_bt[i + 1] == T_VOID && 2306 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2307 double_move(masm, in_regs[i], out_regs[c_arg]); 2308 break; 2309 2310 case T_LONG : 2311 long_move(masm, in_regs[i], out_regs[c_arg]); 2312 break; 2313 2314 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2315 2316 default: 2317 move32_64(masm, in_regs[i], out_regs[c_arg]); 2318 } 2319 } 2320 2321 // Pre-load a static method's oop into O1. Used both by locking code and 2322 // the normal JNI call code. 2323 if (method->is_static() && !is_critical_native) { 2324 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1); 2325 2326 // Now handlize the static class mirror in O1. It's known not-null. 2327 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2328 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2329 __ add(SP, klass_offset + STACK_BIAS, O1); 2330 } 2331 2332 2333 const Register L6_handle = L6; 2334 2335 if (method->is_synchronized()) { 2336 assert(!is_critical_native, "unhandled"); 2337 __ mov(O1, L6_handle); 2338 } 2339 2340 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2341 // except O6/O7. So if we must call out we must push a new frame. We immediately 2342 // push a new frame and flush the windows. 2343 #ifdef _LP64 2344 intptr_t thepc = (intptr_t) __ pc(); 2345 { 2346 address here = __ pc(); 2347 // Call the next instruction 2348 __ call(here + 8, relocInfo::none); 2349 __ delayed()->nop(); 2350 } 2351 #else 2352 intptr_t thepc = __ load_pc_address(O7, 0); 2353 #endif /* _LP64 */ 2354 2355 // We use the same pc/oopMap repeatedly when we call out 2356 oop_maps->add_gc_map(thepc - start, map); 2357 2358 // O7 now has the pc loaded that we will use when we finally call to native. 2359 2360 // Save thread in L7; it crosses a bunch of VM calls below 2361 // Don't use save_thread because it smashes G2 and we merely 2362 // want to save a copy 2363 __ mov(G2_thread, L7_thread_cache); 2364 2365 2366 // If we create an inner frame once is plenty 2367 // when we create it we must also save G2_thread 2368 bool inner_frame_created = false; 2369 2370 // dtrace method entry support 2371 { 2372 SkipIfEqual skip_if( 2373 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2374 // create inner frame 2375 __ save_frame(0); 2376 __ mov(G2_thread, L7_thread_cache); 2377 __ set_metadata_constant(method(), O1); 2378 __ call_VM_leaf(L7_thread_cache, 2379 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2380 G2_thread, O1); 2381 __ restore(); 2382 } 2383 2384 // RedefineClasses() tracing support for obsolete method entry 2385 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2386 // create inner frame 2387 __ save_frame(0); 2388 __ mov(G2_thread, L7_thread_cache); 2389 __ set_metadata_constant(method(), O1); 2390 __ call_VM_leaf(L7_thread_cache, 2391 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2392 G2_thread, O1); 2393 __ restore(); 2394 } 2395 2396 // We are in the jni frame unless saved_frame is true in which case 2397 // we are in one frame deeper (the "inner" frame). If we are in the 2398 // "inner" frames the args are in the Iregs and if the jni frame then 2399 // they are in the Oregs. 2400 // If we ever need to go to the VM (for locking, jvmti) then 2401 // we will always be in the "inner" frame. 2402 2403 // Lock a synchronized method 2404 int lock_offset = -1; // Set if locked 2405 if (method->is_synchronized()) { 2406 Register Roop = O1; 2407 const Register L3_box = L3; 2408 2409 create_inner_frame(masm, &inner_frame_created); 2410 2411 __ ld_ptr(I1, 0, O1); 2412 Label done; 2413 2414 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size); 2415 __ add(FP, lock_offset+STACK_BIAS, L3_box); 2416 #ifdef ASSERT 2417 if (UseBiasedLocking) { 2418 // making the box point to itself will make it clear it went unused 2419 // but also be obviously invalid 2420 __ st_ptr(L3_box, L3_box, 0); 2421 } 2422 #endif // ASSERT 2423 // 2424 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch 2425 // 2426 __ compiler_lock_object(Roop, L1, L3_box, L2); 2427 __ br(Assembler::equal, false, Assembler::pt, done); 2428 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box); 2429 2430 2431 // None of the above fast optimizations worked so we have to get into the 2432 // slow case of monitor enter. Inline a special case of call_VM that 2433 // disallows any pending_exception. 2434 __ mov(Roop, O0); // Need oop in O0 2435 __ mov(L3_box, O1); 2436 2437 // Record last_Java_sp, in case the VM code releases the JVM lock. 2438 2439 __ set_last_Java_frame(FP, I7); 2440 2441 // do the call 2442 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2443 __ delayed()->mov(L7_thread_cache, O2); 2444 2445 __ restore_thread(L7_thread_cache); // restore G2_thread 2446 __ reset_last_Java_frame(); 2447 2448 #ifdef ASSERT 2449 { Label L; 2450 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2451 __ br_null_short(O0, Assembler::pt, L); 2452 __ stop("no pending exception allowed on exit from IR::monitorenter"); 2453 __ bind(L); 2454 } 2455 #endif 2456 __ bind(done); 2457 } 2458 2459 2460 // Finally just about ready to make the JNI call 2461 2462 __ flushw(); 2463 if (inner_frame_created) { 2464 __ restore(); 2465 } else { 2466 // Store only what we need from this frame 2467 // QQQ I think that non-v9 (like we care) we don't need these saves 2468 // either as the flush traps and the current window goes too. 2469 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2470 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2471 } 2472 2473 // get JNIEnv* which is first argument to native 2474 if (!is_critical_native) { 2475 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2476 } 2477 2478 // Use that pc we placed in O7 a while back as the current frame anchor 2479 __ set_last_Java_frame(SP, O7); 2480 2481 // We flushed the windows ages ago now mark them as flushed before transitioning. 2482 __ set(JavaFrameAnchor::flushed, G3_scratch); 2483 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 2484 2485 // Transition from _thread_in_Java to _thread_in_native. 2486 __ set(_thread_in_native, G3_scratch); 2487 2488 #ifdef _LP64 2489 AddressLiteral dest(native_func); 2490 __ relocate(relocInfo::runtime_call_type); 2491 __ jumpl_to(dest, O7, O7); 2492 #else 2493 __ call(native_func, relocInfo::runtime_call_type); 2494 #endif 2495 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2496 2497 __ restore_thread(L7_thread_cache); // restore G2_thread 2498 2499 // Unpack native results. For int-types, we do any needed sign-extension 2500 // and move things into I0. The return value there will survive any VM 2501 // calls for blocking or unlocking. An FP or OOP result (handle) is done 2502 // specially in the slow-path code. 2503 switch (ret_type) { 2504 case T_VOID: break; // Nothing to do! 2505 case T_FLOAT: break; // Got it where we want it (unless slow-path) 2506 case T_DOUBLE: break; // Got it where we want it (unless slow-path) 2507 // In 64 bits build result is in O0, in O0, O1 in 32bit build 2508 case T_LONG: 2509 #ifndef _LP64 2510 __ mov(O1, I1); 2511 #endif 2512 // Fall thru 2513 case T_OBJECT: // Really a handle 2514 case T_ARRAY: 2515 case T_INT: 2516 __ mov(O0, I0); 2517 break; 2518 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false 2519 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break; 2520 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value! 2521 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break; 2522 break; // Cannot de-handlize until after reclaiming jvm_lock 2523 default: 2524 ShouldNotReachHere(); 2525 } 2526 2527 Label after_transition; 2528 // must we block? 2529 2530 // Block, if necessary, before resuming in _thread_in_Java state. 2531 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2532 { Label no_block; 2533 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 2534 2535 // Switch thread to "native transition" state before reading the synchronization state. 2536 // This additional state is necessary because reading and testing the synchronization 2537 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2538 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2539 // VM thread changes sync state to synchronizing and suspends threads for GC. 2540 // Thread A is resumed to finish this native method, but doesn't block here since it 2541 // didn't see any synchronization is progress, and escapes. 2542 __ set(_thread_in_native_trans, G3_scratch); 2543 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2544 if(os::is_MP()) { 2545 if (UseMembar) { 2546 // Force this write out before the read below 2547 __ membar(Assembler::StoreLoad); 2548 } else { 2549 // Write serialization page so VM thread can do a pseudo remote membar. 2550 // We use the current thread pointer to calculate a thread specific 2551 // offset to write to within the page. This minimizes bus traffic 2552 // due to cache line collision. 2553 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 2554 } 2555 } 2556 __ load_contents(sync_state, G3_scratch); 2557 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 2558 2559 Label L; 2560 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); 2561 __ br(Assembler::notEqual, false, Assembler::pn, L); 2562 __ delayed()->ld(suspend_state, G3_scratch); 2563 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 2564 __ bind(L); 2565 2566 // Block. Save any potential method result value before the operation and 2567 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2568 // lets us share the oopMap we used when we went native rather the create 2569 // a distinct one for this pc 2570 // 2571 save_native_result(masm, ret_type, stack_slots); 2572 if (!is_critical_native) { 2573 __ call_VM_leaf(L7_thread_cache, 2574 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2575 G2_thread); 2576 } else { 2577 __ call_VM_leaf(L7_thread_cache, 2578 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), 2579 G2_thread); 2580 } 2581 2582 // Restore any method result value 2583 restore_native_result(masm, ret_type, stack_slots); 2584 2585 if (is_critical_native) { 2586 // The call above performed the transition to thread_in_Java so 2587 // skip the transition logic below. 2588 __ ba(after_transition); 2589 __ delayed()->nop(); 2590 } 2591 2592 __ bind(no_block); 2593 } 2594 2595 // thread state is thread_in_native_trans. Any safepoint blocking has already 2596 // happened so we can now change state to _thread_in_Java. 2597 __ set(_thread_in_Java, G3_scratch); 2598 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2599 __ bind(after_transition); 2600 2601 Label no_reguard; 2602 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch); 2603 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard); 2604 2605 save_native_result(masm, ret_type, stack_slots); 2606 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2607 __ delayed()->nop(); 2608 2609 __ restore_thread(L7_thread_cache); // restore G2_thread 2610 restore_native_result(masm, ret_type, stack_slots); 2611 2612 __ bind(no_reguard); 2613 2614 // Handle possible exception (will unlock if necessary) 2615 2616 // native result if any is live in freg or I0 (and I1 if long and 32bit vm) 2617 2618 // Unlock 2619 if (method->is_synchronized()) { 2620 Label done; 2621 Register I2_ex_oop = I2; 2622 const Register L3_box = L3; 2623 // Get locked oop from the handle we passed to jni 2624 __ ld_ptr(L6_handle, 0, L4); 2625 __ add(SP, lock_offset+STACK_BIAS, L3_box); 2626 // Must save pending exception around the slow-path VM call. Since it's a 2627 // leaf call, the pending exception (if any) can be kept in a register. 2628 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop); 2629 // Now unlock 2630 // (Roop, Rmark, Rbox, Rscratch) 2631 __ compiler_unlock_object(L4, L1, L3_box, L2); 2632 __ br(Assembler::equal, false, Assembler::pt, done); 2633 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box); 2634 2635 // save and restore any potential method result value around the unlocking 2636 // operation. Will save in I0 (or stack for FP returns). 2637 save_native_result(masm, ret_type, stack_slots); 2638 2639 // Must clear pending-exception before re-entering the VM. Since this is 2640 // a leaf call, pending-exception-oop can be safely kept in a register. 2641 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset())); 2642 2643 // slow case of monitor enter. Inline a special case of call_VM that 2644 // disallows any pending_exception. 2645 __ mov(L3_box, O1); 2646 2647 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type); 2648 __ delayed()->mov(L4, O0); // Need oop in O0 2649 2650 __ restore_thread(L7_thread_cache); // restore G2_thread 2651 2652 #ifdef ASSERT 2653 { Label L; 2654 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2655 __ br_null_short(O0, Assembler::pt, L); 2656 __ stop("no pending exception allowed on exit from IR::monitorexit"); 2657 __ bind(L); 2658 } 2659 #endif 2660 restore_native_result(masm, ret_type, stack_slots); 2661 // check_forward_pending_exception jump to forward_exception if any pending 2662 // exception is set. The forward_exception routine expects to see the 2663 // exception in pending_exception and not in a register. Kind of clumsy, 2664 // since all folks who branch to forward_exception must have tested 2665 // pending_exception first and hence have it in a register already. 2666 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset())); 2667 __ bind(done); 2668 } 2669 2670 // Tell dtrace about this method exit 2671 { 2672 SkipIfEqual skip_if( 2673 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2674 save_native_result(masm, ret_type, stack_slots); 2675 __ set_metadata_constant(method(), O1); 2676 __ call_VM_leaf(L7_thread_cache, 2677 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2678 G2_thread, O1); 2679 restore_native_result(masm, ret_type, stack_slots); 2680 } 2681 2682 // Clear "last Java frame" SP and PC. 2683 __ verify_thread(); // G2_thread must be correct 2684 __ reset_last_Java_frame(); 2685 2686 // Unpack oop result 2687 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2688 Label L; 2689 __ addcc(G0, I0, G0); 2690 __ brx(Assembler::notZero, true, Assembler::pt, L); 2691 __ delayed()->ld_ptr(I0, 0, I0); 2692 __ mov(G0, I0); 2693 __ bind(L); 2694 __ verify_oop(I0); 2695 } 2696 2697 if (!is_critical_native) { 2698 // reset handle block 2699 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2700 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2701 2702 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2703 check_forward_pending_exception(masm, G3_scratch); 2704 } 2705 2706 2707 // Return 2708 2709 #ifndef _LP64 2710 if (ret_type == T_LONG) { 2711 2712 // Must leave proper result in O0,O1 and G1 (c2/tiered only) 2713 __ sllx(I0, 32, G1); // Shift bits into high G1 2714 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 2715 __ or3 (I1, G1, G1); // OR 64 bits into G1 2716 } 2717 #endif 2718 2719 __ ret(); 2720 __ delayed()->restore(); 2721 2722 __ flush(); 2723 2724 nmethod *nm = nmethod::new_native_nmethod(method, 2725 compile_id, 2726 masm->code(), 2727 vep_offset, 2728 frame_complete, 2729 stack_slots / VMRegImpl::slots_per_word, 2730 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2731 in_ByteSize(lock_offset), 2732 oop_maps); 2733 2734 if (is_critical_native) { 2735 nm->set_lazy_critical_native(true); 2736 } 2737 return nm; 2738 2739 } 2740 2741 #ifdef HAVE_DTRACE_H 2742 // --------------------------------------------------------------------------- 2743 // Generate a dtrace nmethod for a given signature. The method takes arguments 2744 // in the Java compiled code convention, marshals them to the native 2745 // abi and then leaves nops at the position you would expect to call a native 2746 // function. When the probe is enabled the nops are replaced with a trap 2747 // instruction that dtrace inserts and the trace will cause a notification 2748 // to dtrace. 2749 // 2750 // The probes are only able to take primitive types and java/lang/String as 2751 // arguments. No other java types are allowed. Strings are converted to utf8 2752 // strings so that from dtrace point of view java strings are converted to C 2753 // strings. There is an arbitrary fixed limit on the total space that a method 2754 // can use for converting the strings. (256 chars per string in the signature). 2755 // So any java string larger then this is truncated. 2756 2757 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 }; 2758 static bool offsets_initialized = false; 2759 2760 nmethod *SharedRuntime::generate_dtrace_nmethod( 2761 MacroAssembler *masm, methodHandle method) { 2762 2763 2764 // generate_dtrace_nmethod is guarded by a mutex so we are sure to 2765 // be single threaded in this method. 2766 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be"); 2767 2768 // Fill in the signature array, for the calling-convention call. 2769 int total_args_passed = method->size_of_parameters(); 2770 2771 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); 2772 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); 2773 2774 // The signature we are going to use for the trap that dtrace will see 2775 // java/lang/String is converted. We drop "this" and any other object 2776 // is converted to NULL. (A one-slot java/lang/Long object reference 2777 // is converted to a two-slot long, which is why we double the allocation). 2778 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2); 2779 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2); 2780 2781 int i=0; 2782 int total_strings = 0; 2783 int first_arg_to_pass = 0; 2784 int total_c_args = 0; 2785 2786 // Skip the receiver as dtrace doesn't want to see it 2787 if( !method->is_static() ) { 2788 in_sig_bt[i++] = T_OBJECT; 2789 first_arg_to_pass = 1; 2790 } 2791 2792 SignatureStream ss(method->signature()); 2793 for ( ; !ss.at_return_type(); ss.next()) { 2794 BasicType bt = ss.type(); 2795 in_sig_bt[i++] = bt; // Collect remaining bits of signature 2796 out_sig_bt[total_c_args++] = bt; 2797 if( bt == T_OBJECT) { 2798 Symbol* s = ss.as_symbol_or_null(); 2799 if (s == vmSymbols::java_lang_String()) { 2800 total_strings++; 2801 out_sig_bt[total_c_args-1] = T_ADDRESS; 2802 } else if (s == vmSymbols::java_lang_Boolean() || 2803 s == vmSymbols::java_lang_Byte()) { 2804 out_sig_bt[total_c_args-1] = T_BYTE; 2805 } else if (s == vmSymbols::java_lang_Character() || 2806 s == vmSymbols::java_lang_Short()) { 2807 out_sig_bt[total_c_args-1] = T_SHORT; 2808 } else if (s == vmSymbols::java_lang_Integer() || 2809 s == vmSymbols::java_lang_Float()) { 2810 out_sig_bt[total_c_args-1] = T_INT; 2811 } else if (s == vmSymbols::java_lang_Long() || 2812 s == vmSymbols::java_lang_Double()) { 2813 out_sig_bt[total_c_args-1] = T_LONG; 2814 out_sig_bt[total_c_args++] = T_VOID; 2815 } 2816 } else if ( bt == T_LONG || bt == T_DOUBLE ) { 2817 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots 2818 // We convert double to long 2819 out_sig_bt[total_c_args-1] = T_LONG; 2820 out_sig_bt[total_c_args++] = T_VOID; 2821 } else if ( bt == T_FLOAT) { 2822 // We convert float to int 2823 out_sig_bt[total_c_args-1] = T_INT; 2824 } 2825 } 2826 2827 assert(i==total_args_passed, "validly parsed signature"); 2828 2829 // Now get the compiled-Java layout as input arguments 2830 int comp_args_on_stack; 2831 comp_args_on_stack = SharedRuntime::java_calling_convention( 2832 in_sig_bt, in_regs, total_args_passed, false); 2833 2834 // We have received a description of where all the java arg are located 2835 // on entry to the wrapper. We need to convert these args to where 2836 // the a native (non-jni) function would expect them. To figure out 2837 // where they go we convert the java signature to a C signature and remove 2838 // T_VOID for any long/double we might have received. 2839 2840 2841 // Now figure out where the args must be stored and how much stack space 2842 // they require (neglecting out_preserve_stack_slots but space for storing 2843 // the 1st six register arguments). It's weird see int_stk_helper. 2844 // 2845 int out_arg_slots; 2846 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 2847 2848 // Calculate the total number of stack slots we will need. 2849 2850 // First count the abi requirement plus all of the outgoing args 2851 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2852 2853 // Plus a temp for possible converion of float/double/long register args 2854 2855 int conversion_temp = stack_slots; 2856 stack_slots += 2; 2857 2858 2859 // Now space for the string(s) we must convert 2860 2861 int string_locs = stack_slots; 2862 stack_slots += total_strings * 2863 (max_dtrace_string_size / VMRegImpl::stack_slot_size); 2864 2865 // Ok The space we have allocated will look like: 2866 // 2867 // 2868 // FP-> | | 2869 // |---------------------| 2870 // | string[n] | 2871 // |---------------------| <- string_locs[n] 2872 // | string[n-1] | 2873 // |---------------------| <- string_locs[n-1] 2874 // | ... | 2875 // | ... | 2876 // |---------------------| <- string_locs[1] 2877 // | string[0] | 2878 // |---------------------| <- string_locs[0] 2879 // | temp | 2880 // |---------------------| <- conversion_temp 2881 // | outbound memory | 2882 // | based arguments | 2883 // | | 2884 // |---------------------| 2885 // | | 2886 // SP-> | out_preserved_slots | 2887 // 2888 // 2889 2890 // Now compute actual number of stack words we need rounding to make 2891 // stack properly aligned. 2892 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word); 2893 2894 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2895 2896 intptr_t start = (intptr_t)__ pc(); 2897 2898 // First thing make an ic check to see if we should even be here 2899 2900 { 2901 Label L; 2902 const Register temp_reg = G3_scratch; 2903 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 2904 __ verify_oop(O0); 2905 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg); 2906 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L); 2907 2908 __ jump_to(ic_miss, temp_reg); 2909 __ delayed()->nop(); 2910 __ align(CodeEntryAlignment); 2911 __ bind(L); 2912 } 2913 2914 int vep_offset = ((intptr_t)__ pc()) - start; 2915 2916 2917 // The instruction at the verified entry point must be 5 bytes or longer 2918 // because it can be patched on the fly by make_non_entrant. The stack bang 2919 // instruction fits that requirement. 2920 2921 // Generate stack overflow check before creating frame 2922 __ generate_stack_overflow_check(stack_size); 2923 2924 assert(((intptr_t)__ pc() - start - vep_offset) >= 5, 2925 "valid size for make_non_entrant"); 2926 2927 // Generate a new frame for the wrapper. 2928 __ save(SP, -stack_size, SP); 2929 2930 // Frame is now completed as far a size and linkage. 2931 2932 int frame_complete = ((intptr_t)__ pc()) - start; 2933 2934 #ifdef ASSERT 2935 bool reg_destroyed[RegisterImpl::number_of_registers]; 2936 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2937 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2938 reg_destroyed[r] = false; 2939 } 2940 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2941 freg_destroyed[f] = false; 2942 } 2943 2944 #endif /* ASSERT */ 2945 2946 VMRegPair zero; 2947 const Register g0 = G0; // without this we get a compiler warning (why??) 2948 zero.set2(g0->as_VMReg()); 2949 2950 int c_arg, j_arg; 2951 2952 Register conversion_off = noreg; 2953 2954 for (j_arg = first_arg_to_pass, c_arg = 0 ; 2955 j_arg < total_args_passed ; j_arg++, c_arg++ ) { 2956 2957 VMRegPair src = in_regs[j_arg]; 2958 VMRegPair dst = out_regs[c_arg]; 2959 2960 #ifdef ASSERT 2961 if (src.first()->is_Register()) { 2962 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!"); 2963 } else if (src.first()->is_FloatRegister()) { 2964 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding( 2965 FloatRegisterImpl::S)], "ack!"); 2966 } 2967 if (dst.first()->is_Register()) { 2968 reg_destroyed[dst.first()->as_Register()->encoding()] = true; 2969 } else if (dst.first()->is_FloatRegister()) { 2970 freg_destroyed[dst.first()->as_FloatRegister()->encoding( 2971 FloatRegisterImpl::S)] = true; 2972 } 2973 #endif /* ASSERT */ 2974 2975 switch (in_sig_bt[j_arg]) { 2976 case T_ARRAY: 2977 case T_OBJECT: 2978 { 2979 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT || 2980 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) { 2981 // need to unbox a one-slot value 2982 Register in_reg = L0; 2983 Register tmp = L2; 2984 if ( src.first()->is_reg() ) { 2985 in_reg = src.first()->as_Register(); 2986 } else { 2987 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS), 2988 "must be"); 2989 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg); 2990 } 2991 // If the final destination is an acceptable register 2992 if ( dst.first()->is_reg() ) { 2993 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) { 2994 tmp = dst.first()->as_Register(); 2995 } 2996 } 2997 2998 Label skipUnbox; 2999 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) { 3000 __ mov(G0, tmp->successor()); 3001 } 3002 __ br_null(in_reg, true, Assembler::pn, skipUnbox); 3003 __ delayed()->mov(G0, tmp); 3004 3005 BasicType bt = out_sig_bt[c_arg]; 3006 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt); 3007 switch (bt) { 3008 case T_BYTE: 3009 __ ldub(in_reg, box_offset, tmp); break; 3010 case T_SHORT: 3011 __ lduh(in_reg, box_offset, tmp); break; 3012 case T_INT: 3013 __ ld(in_reg, box_offset, tmp); break; 3014 case T_LONG: 3015 __ ld_long(in_reg, box_offset, tmp); break; 3016 default: ShouldNotReachHere(); 3017 } 3018 3019 __ bind(skipUnbox); 3020 // If tmp wasn't final destination copy to final destination 3021 if (tmp == L2) { 3022 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2); 3023 if (out_sig_bt[c_arg] == T_LONG) { 3024 long_move(masm, tmp_as_VM, dst); 3025 } else { 3026 move32_64(masm, tmp_as_VM, out_regs[c_arg]); 3027 } 3028 } 3029 if (out_sig_bt[c_arg] == T_LONG) { 3030 assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); 3031 ++c_arg; // move over the T_VOID to keep the loop indices in sync 3032 } 3033 } else if (out_sig_bt[c_arg] == T_ADDRESS) { 3034 Register s = 3035 src.first()->is_reg() ? src.first()->as_Register() : L2; 3036 Register d = 3037 dst.first()->is_reg() ? dst.first()->as_Register() : L2; 3038 3039 // We store the oop now so that the conversion pass can reach 3040 // while in the inner frame. This will be the only store if 3041 // the oop is NULL. 3042 if (s != L2) { 3043 // src is register 3044 if (d != L2) { 3045 // dst is register 3046 __ mov(s, d); 3047 } else { 3048 assert(Assembler::is_simm13(reg2offset(dst.first()) + 3049 STACK_BIAS), "must be"); 3050 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS); 3051 } 3052 } else { 3053 // src not a register 3054 assert(Assembler::is_simm13(reg2offset(src.first()) + 3055 STACK_BIAS), "must be"); 3056 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d); 3057 if (d == L2) { 3058 assert(Assembler::is_simm13(reg2offset(dst.first()) + 3059 STACK_BIAS), "must be"); 3060 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS); 3061 } 3062 } 3063 } else if (out_sig_bt[c_arg] != T_VOID) { 3064 // Convert the arg to NULL 3065 if (dst.first()->is_reg()) { 3066 __ mov(G0, dst.first()->as_Register()); 3067 } else { 3068 assert(Assembler::is_simm13(reg2offset(dst.first()) + 3069 STACK_BIAS), "must be"); 3070 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS); 3071 } 3072 } 3073 } 3074 break; 3075 case T_VOID: 3076 break; 3077 3078 case T_FLOAT: 3079 if (src.first()->is_stack()) { 3080 // Stack to stack/reg is simple 3081 move32_64(masm, src, dst); 3082 } else { 3083 if (dst.first()->is_reg()) { 3084 // freg -> reg 3085 int off = 3086 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 3087 Register d = dst.first()->as_Register(); 3088 if (Assembler::is_simm13(off)) { 3089 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 3090 SP, off); 3091 __ ld(SP, off, d); 3092 } else { 3093 if (conversion_off == noreg) { 3094 __ set(off, L6); 3095 conversion_off = L6; 3096 } 3097 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 3098 SP, conversion_off); 3099 __ ld(SP, conversion_off , d); 3100 } 3101 } else { 3102 // freg -> mem 3103 int off = STACK_BIAS + reg2offset(dst.first()); 3104 if (Assembler::is_simm13(off)) { 3105 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 3106 SP, off); 3107 } else { 3108 if (conversion_off == noreg) { 3109 __ set(off, L6); 3110 conversion_off = L6; 3111 } 3112 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 3113 SP, conversion_off); 3114 } 3115 } 3116 } 3117 break; 3118 3119 case T_DOUBLE: 3120 assert( j_arg + 1 < total_args_passed && 3121 in_sig_bt[j_arg + 1] == T_VOID && 3122 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 3123 if (src.first()->is_stack()) { 3124 // Stack to stack/reg is simple 3125 long_move(masm, src, dst); 3126 } else { 3127 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2; 3128 3129 // Destination could be an odd reg on 32bit in which case 3130 // we can't load direct to the destination. 3131 3132 if (!d->is_even() && wordSize == 4) { 3133 d = L2; 3134 } 3135 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 3136 if (Assembler::is_simm13(off)) { 3137 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 3138 SP, off); 3139 __ ld_long(SP, off, d); 3140 } else { 3141 if (conversion_off == noreg) { 3142 __ set(off, L6); 3143 conversion_off = L6; 3144 } 3145 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 3146 SP, conversion_off); 3147 __ ld_long(SP, conversion_off, d); 3148 } 3149 if (d == L2) { 3150 long_move(masm, reg64_to_VMRegPair(L2), dst); 3151 } 3152 } 3153 break; 3154 3155 case T_LONG : 3156 // 32bit can't do a split move of something like g1 -> O0, O1 3157 // so use a memory temp 3158 if (src.is_single_phys_reg() && wordSize == 4) { 3159 Register tmp = L2; 3160 if (dst.first()->is_reg() && 3161 (wordSize == 8 || dst.first()->as_Register()->is_even())) { 3162 tmp = dst.first()->as_Register(); 3163 } 3164 3165 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 3166 if (Assembler::is_simm13(off)) { 3167 __ stx(src.first()->as_Register(), SP, off); 3168 __ ld_long(SP, off, tmp); 3169 } else { 3170 if (conversion_off == noreg) { 3171 __ set(off, L6); 3172 conversion_off = L6; 3173 } 3174 __ stx(src.first()->as_Register(), SP, conversion_off); 3175 __ ld_long(SP, conversion_off, tmp); 3176 } 3177 3178 if (tmp == L2) { 3179 long_move(masm, reg64_to_VMRegPair(L2), dst); 3180 } 3181 } else { 3182 long_move(masm, src, dst); 3183 } 3184 break; 3185 3186 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 3187 3188 default: 3189 move32_64(masm, src, dst); 3190 } 3191 } 3192 3193 3194 // If we have any strings we must store any register based arg to the stack 3195 // This includes any still live xmm registers too. 3196 3197 if (total_strings > 0 ) { 3198 3199 // protect all the arg registers 3200 __ save_frame(0); 3201 __ mov(G2_thread, L7_thread_cache); 3202 const Register L2_string_off = L2; 3203 3204 // Get first string offset 3205 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off); 3206 3207 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) { 3208 if (out_sig_bt[c_arg] == T_ADDRESS) { 3209 3210 VMRegPair dst = out_regs[c_arg]; 3211 const Register d = dst.first()->is_reg() ? 3212 dst.first()->as_Register()->after_save() : noreg; 3213 3214 // It's a string the oop and it was already copied to the out arg 3215 // position 3216 if (d != noreg) { 3217 __ mov(d, O0); 3218 } else { 3219 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 3220 "must be"); 3221 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0); 3222 } 3223 Label skip; 3224 3225 __ br_null(O0, false, Assembler::pn, skip); 3226 __ delayed()->add(FP, L2_string_off, O1); 3227 3228 if (d != noreg) { 3229 __ mov(O1, d); 3230 } else { 3231 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 3232 "must be"); 3233 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS); 3234 } 3235 3236 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf), 3237 relocInfo::runtime_call_type); 3238 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off); 3239 3240 __ bind(skip); 3241 3242 } 3243 3244 } 3245 __ mov(L7_thread_cache, G2_thread); 3246 __ restore(); 3247 3248 } 3249 3250 3251 // Ok now we are done. Need to place the nop that dtrace wants in order to 3252 // patch in the trap 3253 3254 int patch_offset = ((intptr_t)__ pc()) - start; 3255 3256 __ nop(); 3257 3258 3259 // Return 3260 3261 __ ret(); 3262 __ delayed()->restore(); 3263 3264 __ flush(); 3265 3266 nmethod *nm = nmethod::new_dtrace_nmethod( 3267 method, masm->code(), vep_offset, patch_offset, frame_complete, 3268 stack_slots / VMRegImpl::slots_per_word); 3269 return nm; 3270 3271 } 3272 3273 #endif // HAVE_DTRACE_H 3274 3275 // this function returns the adjust size (in number of words) to a c2i adapter 3276 // activation for use during deoptimization 3277 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 3278 assert(callee_locals >= callee_parameters, 3279 "test and remove; got more parms than locals"); 3280 if (callee_locals < callee_parameters) 3281 return 0; // No adjustment for negative locals 3282 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 3283 return round_to(diff, WordsPerLong); 3284 } 3285 3286 // "Top of Stack" slots that may be unused by the calling convention but must 3287 // otherwise be preserved. 3288 // On Intel these are not necessary and the value can be zero. 3289 // On Sparc this describes the words reserved for storing a register window 3290 // when an interrupt occurs. 3291 uint SharedRuntime::out_preserve_stack_slots() { 3292 return frame::register_save_words * VMRegImpl::slots_per_word; 3293 } 3294 3295 static void gen_new_frame(MacroAssembler* masm, bool deopt) { 3296 // 3297 // Common out the new frame generation for deopt and uncommon trap 3298 // 3299 Register G3pcs = G3_scratch; // Array of new pcs (input) 3300 Register Oreturn0 = O0; 3301 Register Oreturn1 = O1; 3302 Register O2UnrollBlock = O2; 3303 Register O3array = O3; // Array of frame sizes (input) 3304 Register O4array_size = O4; // number of frames (input) 3305 Register O7frame_size = O7; // number of frames (input) 3306 3307 __ ld_ptr(O3array, 0, O7frame_size); 3308 __ sub(G0, O7frame_size, O7frame_size); 3309 __ save(SP, O7frame_size, SP); 3310 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc 3311 3312 #ifdef ASSERT 3313 // make sure that the frames are aligned properly 3314 #ifndef _LP64 3315 __ btst(wordSize*2-1, SP); 3316 __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc); 3317 #endif 3318 #endif 3319 3320 // Deopt needs to pass some extra live values from frame to frame 3321 3322 if (deopt) { 3323 __ mov(Oreturn0->after_save(), Oreturn0); 3324 __ mov(Oreturn1->after_save(), Oreturn1); 3325 } 3326 3327 __ mov(O4array_size->after_save(), O4array_size); 3328 __ sub(O4array_size, 1, O4array_size); 3329 __ mov(O3array->after_save(), O3array); 3330 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock); 3331 __ add(G3pcs, wordSize, G3pcs); // point to next pc value 3332 3333 #ifdef ASSERT 3334 // trash registers to show a clear pattern in backtraces 3335 __ set(0xDEAD0000, I0); 3336 __ add(I0, 2, I1); 3337 __ add(I0, 4, I2); 3338 __ add(I0, 6, I3); 3339 __ add(I0, 8, I4); 3340 // Don't touch I5 could have valuable savedSP 3341 __ set(0xDEADBEEF, L0); 3342 __ mov(L0, L1); 3343 __ mov(L0, L2); 3344 __ mov(L0, L3); 3345 __ mov(L0, L4); 3346 __ mov(L0, L5); 3347 3348 // trash the return value as there is nothing to return yet 3349 __ set(0xDEAD0001, O7); 3350 #endif 3351 3352 __ mov(SP, O5_savedSP); 3353 } 3354 3355 3356 static void make_new_frames(MacroAssembler* masm, bool deopt) { 3357 // 3358 // loop through the UnrollBlock info and create new frames 3359 // 3360 Register G3pcs = G3_scratch; 3361 Register Oreturn0 = O0; 3362 Register Oreturn1 = O1; 3363 Register O2UnrollBlock = O2; 3364 Register O3array = O3; 3365 Register O4array_size = O4; 3366 Label loop; 3367 3368 // Before we make new frames, check to see if stack is available. 3369 // Do this after the caller's return address is on top of stack 3370 if (UseStackBanging) { 3371 // Get total frame size for interpreted frames 3372 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4); 3373 __ bang_stack_size(O4, O3, G3_scratch); 3374 } 3375 3376 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size); 3377 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs); 3378 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array); 3379 3380 // Adjust old interpreter frame to make space for new frame's extra java locals 3381 // 3382 // We capture the original sp for the transition frame only because it is needed in 3383 // order to properly calculate interpreter_sp_adjustment. Even though in real life 3384 // every interpreter frame captures a savedSP it is only needed at the transition 3385 // (fortunately). If we had to have it correct everywhere then we would need to 3386 // be told the sp_adjustment for each frame we create. If the frame size array 3387 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 3388 // for each frame we create and keep up the illusion every where. 3389 // 3390 3391 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7); 3392 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 3393 __ sub(SP, O7, SP); 3394 3395 #ifdef ASSERT 3396 // make sure that there is at least one entry in the array 3397 __ tst(O4array_size); 3398 __ breakpoint_trap(Assembler::zero, Assembler::icc); 3399 #endif 3400 3401 // Now push the new interpreter frames 3402 __ bind(loop); 3403 3404 // allocate a new frame, filling the registers 3405 3406 gen_new_frame(masm, deopt); // allocate an interpreter frame 3407 3408 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop); 3409 __ delayed()->add(O3array, wordSize, O3array); 3410 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc 3411 3412 } 3413 3414 //------------------------------generate_deopt_blob---------------------------- 3415 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3416 // instead. 3417 void SharedRuntime::generate_deopt_blob() { 3418 // allocate space for the code 3419 ResourceMark rm; 3420 // setup code generation tools 3421 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 3422 if (UseStackBanging) { 3423 pad += StackShadowPages*16 + 32; 3424 } 3425 #ifdef _LP64 3426 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 3427 #else 3428 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread) 3429 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread) 3430 CodeBuffer buffer("deopt_blob", 1600+pad, 512); 3431 #endif /* _LP64 */ 3432 MacroAssembler* masm = new MacroAssembler(&buffer); 3433 FloatRegister Freturn0 = F0; 3434 Register Greturn1 = G1; 3435 Register Oreturn0 = O0; 3436 Register Oreturn1 = O1; 3437 Register O2UnrollBlock = O2; 3438 Register L0deopt_mode = L0; 3439 Register G4deopt_mode = G4_scratch; 3440 int frame_size_words; 3441 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS); 3442 #if !defined(_LP64) && defined(COMPILER2) 3443 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS); 3444 #endif 3445 Label cont; 3446 3447 OopMapSet *oop_maps = new OopMapSet(); 3448 3449 // 3450 // This is the entry point for code which is returning to a de-optimized 3451 // frame. 3452 // The steps taken by this frame are as follows: 3453 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1) 3454 // and all potentially live registers (at a pollpoint many registers can be live). 3455 // 3456 // - call the C routine: Deoptimization::fetch_unroll_info (this function 3457 // returns information about the number and size of interpreter frames 3458 // which are equivalent to the frame which is being deoptimized) 3459 // - deallocate the unpack frame, restoring only results values. Other 3460 // volatile registers will now be captured in the vframeArray as needed. 3461 // - deallocate the deoptimization frame 3462 // - in a loop using the information returned in the previous step 3463 // push new interpreter frames (take care to propagate the return 3464 // values through each new frame pushed) 3465 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0) 3466 // - call the C routine: Deoptimization::unpack_frames (this function 3467 // lays out values on the interpreter frame which was just created) 3468 // - deallocate the dummy unpack_frame 3469 // - ensure that all the return values are correctly set and then do 3470 // a return to the interpreter entry point 3471 // 3472 // Refer to the following methods for more information: 3473 // - Deoptimization::fetch_unroll_info 3474 // - Deoptimization::unpack_frames 3475 3476 OopMap* map = NULL; 3477 3478 int start = __ offset(); 3479 3480 // restore G2, the trampoline destroyed it 3481 __ get_thread(); 3482 3483 // On entry we have been called by the deoptimized nmethod with a call that 3484 // replaced the original call (or safepoint polling location) so the deoptimizing 3485 // pc is now in O7. Return values are still in the expected places 3486 3487 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3488 __ ba(cont); 3489 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode); 3490 3491 int exception_offset = __ offset() - start; 3492 3493 // restore G2, the trampoline destroyed it 3494 __ get_thread(); 3495 3496 // On entry we have been jumped to by the exception handler (or exception_blob 3497 // for server). O0 contains the exception oop and O7 contains the original 3498 // exception pc. So if we push a frame here it will look to the 3499 // stack walking code (fetch_unroll_info) just like a normal call so 3500 // state will be extracted normally. 3501 3502 // save exception oop in JavaThread and fall through into the 3503 // exception_in_tls case since they are handled in same way except 3504 // for where the pending exception is kept. 3505 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); 3506 3507 // 3508 // Vanilla deoptimization with an exception pending in exception_oop 3509 // 3510 int exception_in_tls_offset = __ offset() - start; 3511 3512 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3513 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3514 3515 // Restore G2_thread 3516 __ get_thread(); 3517 3518 #ifdef ASSERT 3519 { 3520 // verify that there is really an exception oop in exception_oop 3521 Label has_exception; 3522 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); 3523 __ br_notnull_short(Oexception, Assembler::pt, has_exception); 3524 __ stop("no exception in thread"); 3525 __ bind(has_exception); 3526 3527 // verify that there is no pending exception 3528 Label no_pending_exception; 3529 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 3530 __ ld_ptr(exception_addr, Oexception); 3531 __ br_null_short(Oexception, Assembler::pt, no_pending_exception); 3532 __ stop("must not have pending exception here"); 3533 __ bind(no_pending_exception); 3534 } 3535 #endif 3536 3537 __ ba(cont); 3538 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);; 3539 3540 // 3541 // Reexecute entry, similar to c2 uncommon trap 3542 // 3543 int reexecute_offset = __ offset() - start; 3544 3545 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3546 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3547 3548 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode); 3549 3550 __ bind(cont); 3551 3552 __ set_last_Java_frame(SP, noreg); 3553 3554 // do the call by hand so we can get the oopmap 3555 3556 __ mov(G2_thread, L7_thread_cache); 3557 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); 3558 __ delayed()->mov(G2_thread, O0); 3559 3560 // Set an oopmap for the call site this describes all our saved volatile registers 3561 3562 oop_maps->add_gc_map( __ offset()-start, map); 3563 3564 __ mov(L7_thread_cache, G2_thread); 3565 3566 __ reset_last_Java_frame(); 3567 3568 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers 3569 // so this move will survive 3570 3571 __ mov(L0deopt_mode, G4deopt_mode); 3572 3573 __ mov(O0, O2UnrollBlock->after_save()); 3574 3575 RegisterSaver::restore_result_registers(masm); 3576 3577 Label noException; 3578 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException); 3579 3580 // Move the pending exception from exception_oop to Oexception so 3581 // the pending exception will be picked up the interpreter. 3582 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3583 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3584 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 3585 __ bind(noException); 3586 3587 // deallocate the deoptimization frame taking care to preserve the return values 3588 __ mov(Oreturn0, Oreturn0->after_save()); 3589 __ mov(Oreturn1, Oreturn1->after_save()); 3590 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3591 __ restore(); 3592 3593 // Allocate new interpreter frame(s) and possible c2i adapter frame 3594 3595 make_new_frames(masm, true); 3596 3597 // push a dummy "unpack_frame" taking care of float return values and 3598 // call Deoptimization::unpack_frames to have the unpacker layout 3599 // information in the interpreter frames just created and then return 3600 // to the interpreter entry point 3601 __ save(SP, -frame_size_words*wordSize, SP); 3602 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); 3603 #if !defined(_LP64) 3604 #if defined(COMPILER2) 3605 // 32-bit 1-register longs return longs in G1 3606 __ stx(Greturn1, saved_Greturn1_addr); 3607 #endif 3608 __ set_last_Java_frame(SP, noreg); 3609 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode); 3610 #else 3611 // LP64 uses g4 in set_last_Java_frame 3612 __ mov(G4deopt_mode, O1); 3613 __ set_last_Java_frame(SP, G0); 3614 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); 3615 #endif 3616 __ reset_last_Java_frame(); 3617 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); 3618 3619 #if !defined(_LP64) && defined(COMPILER2) 3620 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into 3621 // I0/I1 if the return value is long. 3622 Label not_long; 3623 __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long); 3624 __ ldd(saved_Greturn1_addr,I0); 3625 __ bind(not_long); 3626 #endif 3627 __ ret(); 3628 __ delayed()->restore(); 3629 3630 masm->flush(); 3631 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words); 3632 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3633 } 3634 3635 #ifdef COMPILER2 3636 3637 //------------------------------generate_uncommon_trap_blob-------------------- 3638 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3639 // instead. 3640 void SharedRuntime::generate_uncommon_trap_blob() { 3641 // allocate space for the code 3642 ResourceMark rm; 3643 // setup code generation tools 3644 int pad = VerifyThread ? 512 : 0; 3645 if (UseStackBanging) { 3646 pad += StackShadowPages*16 + 32; 3647 } 3648 #ifdef _LP64 3649 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 3650 #else 3651 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread) 3652 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread) 3653 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512); 3654 #endif 3655 MacroAssembler* masm = new MacroAssembler(&buffer); 3656 Register O2UnrollBlock = O2; 3657 Register O2klass_index = O2; 3658 3659 // 3660 // This is the entry point for all traps the compiler takes when it thinks 3661 // it cannot handle further execution of compilation code. The frame is 3662 // deoptimized in these cases and converted into interpreter frames for 3663 // execution 3664 // The steps taken by this frame are as follows: 3665 // - push a fake "unpack_frame" 3666 // - call the C routine Deoptimization::uncommon_trap (this function 3667 // packs the current compiled frame into vframe arrays and returns 3668 // information about the number and size of interpreter frames which 3669 // are equivalent to the frame which is being deoptimized) 3670 // - deallocate the "unpack_frame" 3671 // - deallocate the deoptimization frame 3672 // - in a loop using the information returned in the previous step 3673 // push interpreter frames; 3674 // - create a dummy "unpack_frame" 3675 // - call the C routine: Deoptimization::unpack_frames (this function 3676 // lays out values on the interpreter frame which was just created) 3677 // - deallocate the dummy unpack_frame 3678 // - return to the interpreter entry point 3679 // 3680 // Refer to the following methods for more information: 3681 // - Deoptimization::uncommon_trap 3682 // - Deoptimization::unpack_frame 3683 3684 // the unloaded class index is in O0 (first parameter to this blob) 3685 3686 // push a dummy "unpack_frame" 3687 // and call Deoptimization::uncommon_trap to pack the compiled frame into 3688 // vframe array and return the UnrollBlock information 3689 __ save_frame(0); 3690 __ set_last_Java_frame(SP, noreg); 3691 __ mov(I0, O2klass_index); 3692 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index); 3693 __ reset_last_Java_frame(); 3694 __ mov(O0, O2UnrollBlock->after_save()); 3695 __ restore(); 3696 3697 // deallocate the deoptimized frame taking care to preserve the return values 3698 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3699 __ restore(); 3700 3701 // Allocate new interpreter frame(s) and possible c2i adapter frame 3702 3703 make_new_frames(masm, false); 3704 3705 // push a dummy "unpack_frame" taking care of float return values and 3706 // call Deoptimization::unpack_frames to have the unpacker layout 3707 // information in the interpreter frames just created and then return 3708 // to the interpreter entry point 3709 __ save_frame(0); 3710 __ set_last_Java_frame(SP, noreg); 3711 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case 3712 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3); 3713 __ reset_last_Java_frame(); 3714 __ ret(); 3715 __ delayed()->restore(); 3716 3717 masm->flush(); 3718 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize); 3719 } 3720 3721 #endif // COMPILER2 3722 3723 //------------------------------generate_handler_blob------------------- 3724 // 3725 // Generate a special Compile2Runtime blob that saves all registers, and sets 3726 // up an OopMap. 3727 // 3728 // This blob is jumped to (via a breakpoint and the signal handler) from a 3729 // safepoint in compiled code. On entry to this blob, O7 contains the 3730 // address in the original nmethod at which we should resume normal execution. 3731 // Thus, this blob looks like a subroutine which must preserve lots of 3732 // registers and return normally. Note that O7 is never register-allocated, 3733 // so it is guaranteed to be free here. 3734 // 3735 3736 // The hardest part of what this blob must do is to save the 64-bit %o 3737 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and 3738 // an interrupt will chop off their heads. Making space in the caller's frame 3739 // first will let us save the 64-bit %o's before save'ing, but we cannot hand 3740 // the adjusted FP off to the GC stack-crawler: this will modify the caller's 3741 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save 3742 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). 3743 // Tricky, tricky, tricky... 3744 3745 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3746 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3747 3748 // allocate space for the code 3749 ResourceMark rm; 3750 // setup code generation tools 3751 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3752 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3753 // even larger with TraceJumps 3754 int pad = TraceJumps ? 512 : 0; 3755 CodeBuffer buffer("handler_blob", 1600 + pad, 512); 3756 MacroAssembler* masm = new MacroAssembler(&buffer); 3757 int frame_size_words; 3758 OopMapSet *oop_maps = new OopMapSet(); 3759 OopMap* map = NULL; 3760 3761 int start = __ offset(); 3762 3763 bool cause_return = (poll_type == POLL_AT_RETURN); 3764 // If this causes a return before the processing, then do a "restore" 3765 if (cause_return) { 3766 __ restore(); 3767 } else { 3768 // Make it look like we were called via the poll 3769 // so that frame constructor always sees a valid return address 3770 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7); 3771 __ sub(O7, frame::pc_return_offset, O7); 3772 } 3773 3774 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3775 3776 // setup last_Java_sp (blows G4) 3777 __ set_last_Java_frame(SP, noreg); 3778 3779 // call into the runtime to handle illegal instructions exception 3780 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3781 __ mov(G2_thread, O0); 3782 __ save_thread(L7_thread_cache); 3783 __ call(call_ptr); 3784 __ delayed()->nop(); 3785 3786 // Set an oopmap for the call site. 3787 // We need this not only for callee-saved registers, but also for volatile 3788 // registers that the compiler might be keeping live across a safepoint. 3789 3790 oop_maps->add_gc_map( __ offset() - start, map); 3791 3792 __ restore_thread(L7_thread_cache); 3793 // clear last_Java_sp 3794 __ reset_last_Java_frame(); 3795 3796 // Check for exceptions 3797 Label pending; 3798 3799 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3800 __ br_notnull_short(O1, Assembler::pn, pending); 3801 3802 RegisterSaver::restore_live_registers(masm); 3803 3804 // We are back the the original state on entry and ready to go. 3805 3806 __ retl(); 3807 __ delayed()->nop(); 3808 3809 // Pending exception after the safepoint 3810 3811 __ bind(pending); 3812 3813 RegisterSaver::restore_live_registers(masm); 3814 3815 // We are back the the original state on entry. 3816 3817 // Tail-call forward_exception_entry, with the issuing PC in O7, 3818 // so it looks like the original nmethod called forward_exception_entry. 3819 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3820 __ JMP(O0, 0); 3821 __ delayed()->nop(); 3822 3823 // ------------- 3824 // make sure all code is generated 3825 masm->flush(); 3826 3827 // return exception blob 3828 return SafepointBlob::create(&buffer, oop_maps, frame_size_words); 3829 } 3830 3831 // 3832 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3833 // 3834 // Generate a stub that calls into vm to find out the proper destination 3835 // of a java call. All the argument registers are live at this point 3836 // but since this is generic code we don't know what they are and the caller 3837 // must do any gc of the args. 3838 // 3839 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3840 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3841 3842 // allocate space for the code 3843 ResourceMark rm; 3844 // setup code generation tools 3845 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3846 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3847 // even larger with TraceJumps 3848 int pad = TraceJumps ? 512 : 0; 3849 CodeBuffer buffer(name, 1600 + pad, 512); 3850 MacroAssembler* masm = new MacroAssembler(&buffer); 3851 int frame_size_words; 3852 OopMapSet *oop_maps = new OopMapSet(); 3853 OopMap* map = NULL; 3854 3855 int start = __ offset(); 3856 3857 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3858 3859 int frame_complete = __ offset(); 3860 3861 // setup last_Java_sp (blows G4) 3862 __ set_last_Java_frame(SP, noreg); 3863 3864 // call into the runtime to handle illegal instructions exception 3865 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3866 __ mov(G2_thread, O0); 3867 __ save_thread(L7_thread_cache); 3868 __ call(destination, relocInfo::runtime_call_type); 3869 __ delayed()->nop(); 3870 3871 // O0 contains the address we are going to jump to assuming no exception got installed 3872 3873 // Set an oopmap for the call site. 3874 // We need this not only for callee-saved registers, but also for volatile 3875 // registers that the compiler might be keeping live across a safepoint. 3876 3877 oop_maps->add_gc_map( __ offset() - start, map); 3878 3879 __ restore_thread(L7_thread_cache); 3880 // clear last_Java_sp 3881 __ reset_last_Java_frame(); 3882 3883 // Check for exceptions 3884 Label pending; 3885 3886 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3887 __ br_notnull_short(O1, Assembler::pn, pending); 3888 3889 // get the returned Method* 3890 3891 __ get_vm_result_2(G5_method); 3892 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 3893 3894 // O0 is where we want to jump, overwrite G3 which is saved and scratch 3895 3896 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 3897 3898 RegisterSaver::restore_live_registers(masm); 3899 3900 // We are back the the original state on entry and ready to go. 3901 3902 __ JMP(G3, 0); 3903 __ delayed()->nop(); 3904 3905 // Pending exception after the safepoint 3906 3907 __ bind(pending); 3908 3909 RegisterSaver::restore_live_registers(masm); 3910 3911 // We are back the the original state on entry. 3912 3913 // Tail-call forward_exception_entry, with the issuing PC in O7, 3914 // so it looks like the original nmethod called forward_exception_entry. 3915 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3916 __ JMP(O0, 0); 3917 __ delayed()->nop(); 3918 3919 // ------------- 3920 // make sure all code is generated 3921 masm->flush(); 3922 3923 // return the blob 3924 // frame_size_words or bytes?? 3925 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3926 }