1 /* 2 * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_sharedRuntime_sparc.cpp.incl" 27 28 #define __ masm-> 29 30 #ifdef COMPILER2 31 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob; 32 #endif // COMPILER2 33 34 DeoptimizationBlob* SharedRuntime::_deopt_blob; 35 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob; 36 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob; 37 RuntimeStub* SharedRuntime::_wrong_method_blob; 38 RuntimeStub* SharedRuntime::_ic_miss_blob; 39 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob; 40 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob; 41 RuntimeStub* SharedRuntime::_resolve_static_call_blob; 42 43 class RegisterSaver { 44 45 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O. 46 // The Oregs are problematic. In the 32bit build the compiler can 47 // have O registers live with 64 bit quantities. A window save will 48 // cut the heads off of the registers. We have to do a very extensive 49 // stack dance to save and restore these properly. 50 51 // Note that the Oregs problem only exists if we block at either a polling 52 // page exception a compiled code safepoint that was not originally a call 53 // or deoptimize following one of these kinds of safepoints. 54 55 // Lots of registers to save. For all builds, a window save will preserve 56 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit 57 // builds a window-save will preserve the %o registers. In the LION build 58 // we need to save the 64-bit %o registers which requires we save them 59 // before the window-save (as then they become %i registers and get their 60 // heads chopped off on interrupt). We have to save some %g registers here 61 // as well. 62 enum { 63 // This frame's save area. Includes extra space for the native call: 64 // vararg's layout space and the like. Briefly holds the caller's 65 // register save area. 66 call_args_area = frame::register_save_words_sp_offset + 67 frame::memory_parameter_word_sp_offset*wordSize, 68 // Make sure save locations are always 8 byte aligned. 69 // can't use round_to because it doesn't produce compile time constant 70 start_of_extra_save_area = ((call_args_area + 7) & ~7), 71 g1_offset = start_of_extra_save_area, // g-regs needing saving 72 g3_offset = g1_offset+8, 73 g4_offset = g3_offset+8, 74 g5_offset = g4_offset+8, 75 o0_offset = g5_offset+8, 76 o1_offset = o0_offset+8, 77 o2_offset = o1_offset+8, 78 o3_offset = o2_offset+8, 79 o4_offset = o3_offset+8, 80 o5_offset = o4_offset+8, 81 start_of_flags_save_area = o5_offset+8, 82 ccr_offset = start_of_flags_save_area, 83 fsr_offset = ccr_offset + 8, 84 d00_offset = fsr_offset+8, // Start of float save area 85 register_save_size = d00_offset+8*32 86 }; 87 88 89 public: 90 91 static int Oexception_offset() { return o0_offset; }; 92 static int G3_offset() { return g3_offset; }; 93 static int G5_offset() { return g5_offset; }; 94 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 95 static void restore_live_registers(MacroAssembler* masm); 96 97 // During deoptimization only the result register need to be restored 98 // all the other values have already been extracted. 99 100 static void restore_result_registers(MacroAssembler* masm); 101 }; 102 103 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 104 // Record volatile registers as callee-save values in an OopMap so their save locations will be 105 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 106 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 107 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 108 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 109 int i; 110 // Always make the frame size 16 byte aligned. 111 int frame_size = round_to(additional_frame_words + register_save_size, 16); 112 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words 113 int frame_size_in_slots = frame_size / sizeof(jint); 114 // CodeBlob frame size is in words. 115 *total_frame_words = frame_size / wordSize; 116 // OopMap* map = new OopMap(*total_frame_words, 0); 117 OopMap* map = new OopMap(frame_size_in_slots, 0); 118 119 #if !defined(_LP64) 120 121 // Save 64-bit O registers; they will get their heads chopped off on a 'save'. 122 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 123 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 124 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 125 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 126 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 127 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 128 #endif /* _LP64 */ 129 130 __ save(SP, -frame_size, SP); 131 132 #ifndef _LP64 133 // Reload the 64 bit Oregs. Although they are now Iregs we load them 134 // to Oregs here to avoid interrupts cutting off their heads 135 136 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 137 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 138 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 139 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 140 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 141 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 142 143 __ stx(O0, SP, o0_offset+STACK_BIAS); 144 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg()); 145 146 __ stx(O1, SP, o1_offset+STACK_BIAS); 147 148 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg()); 149 150 __ stx(O2, SP, o2_offset+STACK_BIAS); 151 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg()); 152 153 __ stx(O3, SP, o3_offset+STACK_BIAS); 154 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg()); 155 156 __ stx(O4, SP, o4_offset+STACK_BIAS); 157 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg()); 158 159 __ stx(O5, SP, o5_offset+STACK_BIAS); 160 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg()); 161 #endif /* _LP64 */ 162 163 164 #ifdef _LP64 165 int debug_offset = 0; 166 #else 167 int debug_offset = 4; 168 #endif 169 // Save the G's 170 __ stx(G1, SP, g1_offset+STACK_BIAS); 171 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); 172 173 __ stx(G3, SP, g3_offset+STACK_BIAS); 174 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg()); 175 176 __ stx(G4, SP, g4_offset+STACK_BIAS); 177 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg()); 178 179 __ stx(G5, SP, g5_offset+STACK_BIAS); 180 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg()); 181 182 // This is really a waste but we'll keep things as they were for now 183 if (true) { 184 #ifndef _LP64 185 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next()); 186 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next()); 187 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next()); 188 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next()); 189 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next()); 190 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next()); 191 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next()); 192 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next()); 193 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next()); 194 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next()); 195 #endif /* _LP64 */ 196 } 197 198 199 // Save the flags 200 __ rdccr( G5 ); 201 __ stx(G5, SP, ccr_offset+STACK_BIAS); 202 __ stxfsr(SP, fsr_offset+STACK_BIAS); 203 204 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles) 205 int offset = d00_offset; 206 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 207 FloatRegister f = as_FloatRegister(i); 208 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS); 209 // Record as callee saved both halves of double registers (2 float registers). 210 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg()); 211 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next()); 212 offset += sizeof(double); 213 } 214 215 // And we're done. 216 217 return map; 218 } 219 220 221 // Pop the current frame and restore all the registers that we 222 // saved. 223 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 224 225 // Restore all the FP registers 226 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 227 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i)); 228 } 229 230 __ ldx(SP, ccr_offset+STACK_BIAS, G1); 231 __ wrccr (G1) ; 232 233 // Restore the G's 234 // Note that G2 (AKA GThread) must be saved and restored separately. 235 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr. 236 237 __ ldx(SP, g1_offset+STACK_BIAS, G1); 238 __ ldx(SP, g3_offset+STACK_BIAS, G3); 239 __ ldx(SP, g4_offset+STACK_BIAS, G4); 240 __ ldx(SP, g5_offset+STACK_BIAS, G5); 241 242 243 #if !defined(_LP64) 244 // Restore the 64-bit O's. 245 __ ldx(SP, o0_offset+STACK_BIAS, O0); 246 __ ldx(SP, o1_offset+STACK_BIAS, O1); 247 __ ldx(SP, o2_offset+STACK_BIAS, O2); 248 __ ldx(SP, o3_offset+STACK_BIAS, O3); 249 __ ldx(SP, o4_offset+STACK_BIAS, O4); 250 __ ldx(SP, o5_offset+STACK_BIAS, O5); 251 252 // And temporarily place them in TLS 253 254 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 255 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 256 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 257 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 258 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 259 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 260 #endif /* _LP64 */ 261 262 // Restore flags 263 264 __ ldxfsr(SP, fsr_offset+STACK_BIAS); 265 266 __ restore(); 267 268 #if !defined(_LP64) 269 // Now reload the 64bit Oregs after we've restore the window. 270 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 271 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 272 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 273 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 274 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 275 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 276 #endif /* _LP64 */ 277 278 } 279 280 // Pop the current frame and restore the registers that might be holding 281 // a result. 282 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 283 284 #if !defined(_LP64) 285 // 32bit build returns longs in G1 286 __ ldx(SP, g1_offset+STACK_BIAS, G1); 287 288 // Retrieve the 64-bit O's. 289 __ ldx(SP, o0_offset+STACK_BIAS, O0); 290 __ ldx(SP, o1_offset+STACK_BIAS, O1); 291 // and save to TLS 292 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 293 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 294 #endif /* _LP64 */ 295 296 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); 297 298 __ restore(); 299 300 #if !defined(_LP64) 301 // Now reload the 64bit Oregs after we've restore the window. 302 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 303 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 304 #endif /* _LP64 */ 305 306 } 307 308 // The java_calling_convention describes stack locations as ideal slots on 309 // a frame with no abi restrictions. Since we must observe abi restrictions 310 // (like the placement of the register window) the slots must be biased by 311 // the following value. 312 static int reg2offset(VMReg r) { 313 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 314 } 315 316 // --------------------------------------------------------------------------- 317 // Read the array of BasicTypes from a signature, and compute where the 318 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 319 // quantities. Values less than VMRegImpl::stack0 are registers, those above 320 // refer to 4-byte stack slots. All stack slots are based off of the window 321 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window, 322 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 323 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit 324 // integer registers. Values 64-95 are the (32-bit only) float registers. 325 // Each 32-bit quantity is given its own number, so the integer registers 326 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is 327 // an O0-low and an O0-high. Essentially, all int register numbers are doubled. 328 329 // Register results are passed in O0-O5, for outgoing call arguments. To 330 // convert to incoming arguments, convert all O's to I's. The regs array 331 // refer to the low and hi 32-bit words of 64-bit registers or stack slots. 332 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a 333 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was 334 // passed (used as a placeholder for the other half of longs and doubles in 335 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is 336 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention). 337 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first() 338 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the 339 // same VMRegPair. 340 341 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 342 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 343 // units regardless of build. 344 345 346 // --------------------------------------------------------------------------- 347 // The compiled Java calling convention. The Java convention always passes 348 // 64-bit values in adjacent aligned locations (either registers or stack), 349 // floats in float registers and doubles in aligned float pairs. Values are 350 // packed in the registers. There is no backing varargs store for values in 351 // registers. In the 32-bit build, longs are passed in G1 and G4 (cannot be 352 // passed in I's, because longs in I's get their heads chopped off at 353 // interrupt). 354 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 355 VMRegPair *regs, 356 int total_args_passed, 357 int is_outgoing) { 358 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers"); 359 360 // Convention is to pack the first 6 int/oop args into the first 6 registers 361 // (I0-I5), extras spill to the stack. Then pack the first 8 float args 362 // into F0-F7, extras spill to the stack. Then pad all register sets to 363 // align. Then put longs and doubles into the same registers as they fit, 364 // else spill to the stack. 365 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM; 366 const int flt_reg_max = 8; 367 // 368 // Where 32-bit 1-reg longs start being passed 369 // In tiered we must pass on stack because c1 can't use a "pair" in a single reg. 370 // So make it look like we've filled all the G regs that c2 wants to use. 371 Register g_reg = TieredCompilation ? noreg : G1; 372 373 // Count int/oop and float args. See how many stack slots we'll need and 374 // where the longs & doubles will go. 375 int int_reg_cnt = 0; 376 int flt_reg_cnt = 0; 377 // int stk_reg_pairs = frame::register_save_words*(wordSize>>2); 378 // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots(); 379 int stk_reg_pairs = 0; 380 for (int i = 0; i < total_args_passed; i++) { 381 switch (sig_bt[i]) { 382 case T_LONG: // LP64, longs compete with int args 383 assert(sig_bt[i+1] == T_VOID, ""); 384 #ifdef _LP64 385 if (int_reg_cnt < int_reg_max) int_reg_cnt++; 386 #endif 387 break; 388 case T_OBJECT: 389 case T_ARRAY: 390 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 391 if (int_reg_cnt < int_reg_max) int_reg_cnt++; 392 #ifndef _LP64 393 else stk_reg_pairs++; 394 #endif 395 break; 396 case T_INT: 397 case T_SHORT: 398 case T_CHAR: 399 case T_BYTE: 400 case T_BOOLEAN: 401 if (int_reg_cnt < int_reg_max) int_reg_cnt++; 402 else stk_reg_pairs++; 403 break; 404 case T_FLOAT: 405 if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++; 406 else stk_reg_pairs++; 407 break; 408 case T_DOUBLE: 409 assert(sig_bt[i+1] == T_VOID, ""); 410 break; 411 case T_VOID: 412 break; 413 default: 414 ShouldNotReachHere(); 415 } 416 } 417 418 // This is where the longs/doubles start on the stack. 419 stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round 420 421 int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only 422 int flt_reg_pairs = (flt_reg_cnt+1) & ~1; 423 424 // int stk_reg = frame::register_save_words*(wordSize>>2); 425 // int stk_reg = SharedRuntime::out_preserve_stack_slots(); 426 int stk_reg = 0; 427 int int_reg = 0; 428 int flt_reg = 0; 429 430 // Now do the signature layout 431 for (int i = 0; i < total_args_passed; i++) { 432 switch (sig_bt[i]) { 433 case T_INT: 434 case T_SHORT: 435 case T_CHAR: 436 case T_BYTE: 437 case T_BOOLEAN: 438 #ifndef _LP64 439 case T_OBJECT: 440 case T_ARRAY: 441 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 442 #endif // _LP64 443 if (int_reg < int_reg_max) { 444 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 445 regs[i].set1(r->as_VMReg()); 446 } else { 447 regs[i].set1(VMRegImpl::stack2reg(stk_reg++)); 448 } 449 break; 450 451 #ifdef _LP64 452 case T_OBJECT: 453 case T_ARRAY: 454 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 455 if (int_reg < int_reg_max) { 456 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 457 regs[i].set2(r->as_VMReg()); 458 } else { 459 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 460 stk_reg_pairs += 2; 461 } 462 break; 463 #endif // _LP64 464 465 case T_LONG: 466 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 467 #ifdef _LP64 468 if (int_reg < int_reg_max) { 469 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 470 regs[i].set2(r->as_VMReg()); 471 } else { 472 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 473 stk_reg_pairs += 2; 474 } 475 #else 476 #ifdef COMPILER2 477 // For 32-bit build, can't pass longs in O-regs because they become 478 // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost 479 // spare and available. This convention isn't used by the Sparc ABI or 480 // anywhere else. If we're tiered then we don't use G-regs because c1 481 // can't deal with them as a "pair". (Tiered makes this code think g's are filled) 482 // G0: zero 483 // G1: 1st Long arg 484 // G2: global allocated to TLS 485 // G3: used in inline cache check 486 // G4: 2nd Long arg 487 // G5: used in inline cache check 488 // G6: used by OS 489 // G7: used by OS 490 491 if (g_reg == G1) { 492 regs[i].set2(G1->as_VMReg()); // This long arg in G1 493 g_reg = G4; // Where the next arg goes 494 } else if (g_reg == G4) { 495 regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4 496 g_reg = noreg; // No more longs in registers 497 } else { 498 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 499 stk_reg_pairs += 2; 500 } 501 #else // COMPILER2 502 if (int_reg_pairs + 1 < int_reg_max) { 503 if (is_outgoing) { 504 regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg()); 505 } else { 506 regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg()); 507 } 508 int_reg_pairs += 2; 509 } else { 510 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 511 stk_reg_pairs += 2; 512 } 513 #endif // COMPILER2 514 #endif // _LP64 515 break; 516 517 case T_FLOAT: 518 if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg()); 519 else regs[i].set1( VMRegImpl::stack2reg(stk_reg++)); 520 break; 521 case T_DOUBLE: 522 assert(sig_bt[i+1] == T_VOID, "expecting half"); 523 if (flt_reg_pairs + 1 < flt_reg_max) { 524 regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg()); 525 flt_reg_pairs += 2; 526 } else { 527 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 528 stk_reg_pairs += 2; 529 } 530 break; 531 case T_VOID: regs[i].set_bad(); break; // Halves of longs & doubles 532 default: 533 ShouldNotReachHere(); 534 } 535 } 536 537 // retun the amount of stack space these arguments will need. 538 return stk_reg_pairs; 539 540 } 541 542 // Helper class mostly to avoid passing masm everywhere, and handle 543 // store displacement overflow logic. 544 class AdapterGenerator { 545 MacroAssembler *masm; 546 Register Rdisp; 547 void set_Rdisp(Register r) { Rdisp = r; } 548 549 void patch_callers_callsite(); 550 void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch); 551 552 // base+st_off points to top of argument 553 int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); } 554 int next_arg_offset(const int st_off) { 555 return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes(); 556 } 557 558 int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); } 559 int next_tag_offset(const int st_off) { 560 return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes(); 561 } 562 563 // Argument slot values may be loaded first into a register because 564 // they might not fit into displacement. 565 RegisterOrConstant arg_slot(const int st_off); 566 RegisterOrConstant next_arg_slot(const int st_off); 567 568 RegisterOrConstant tag_slot(const int st_off); 569 RegisterOrConstant next_tag_slot(const int st_off); 570 571 // Stores long into offset pointed to by base 572 void store_c2i_long(Register r, Register base, 573 const int st_off, bool is_stack); 574 void store_c2i_object(Register r, Register base, 575 const int st_off); 576 void store_c2i_int(Register r, Register base, 577 const int st_off); 578 void store_c2i_double(VMReg r_2, 579 VMReg r_1, Register base, const int st_off); 580 void store_c2i_float(FloatRegister f, Register base, 581 const int st_off); 582 583 public: 584 void gen_c2i_adapter(int total_args_passed, 585 // VMReg max_arg, 586 int comp_args_on_stack, // VMRegStackSlots 587 const BasicType *sig_bt, 588 const VMRegPair *regs, 589 Label& skip_fixup); 590 void gen_i2c_adapter(int total_args_passed, 591 // VMReg max_arg, 592 int comp_args_on_stack, // VMRegStackSlots 593 const BasicType *sig_bt, 594 const VMRegPair *regs); 595 596 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} 597 }; 598 599 600 // Patch the callers callsite with entry to compiled code if it exists. 601 void AdapterGenerator::patch_callers_callsite() { 602 Label L; 603 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 604 __ br_null(G3_scratch, false, __ pt, L); 605 // Schedule the branch target address early. 606 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 607 // Call into the VM to patch the caller, then jump to compiled callee 608 __ save_frame(4); // Args in compiled layout; do not blow them 609 610 // Must save all the live Gregs the list is: 611 // G1: 1st Long arg (32bit build) 612 // G2: global allocated to TLS 613 // G3: used in inline cache check (scratch) 614 // G4: 2nd Long arg (32bit build); 615 // G5: used in inline cache check (methodOop) 616 617 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 618 619 #ifdef _LP64 620 // mov(s,d) 621 __ mov(G1, L1); 622 __ mov(G4, L4); 623 __ mov(G5_method, L5); 624 __ mov(G5_method, O0); // VM needs target method 625 __ mov(I7, O1); // VM needs caller's callsite 626 // Must be a leaf call... 627 // can be very far once the blob has been relocated 628 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 629 __ relocate(relocInfo::runtime_call_type); 630 __ jumpl_to(dest, O7, O7); 631 __ delayed()->mov(G2_thread, L7_thread_cache); 632 __ mov(L7_thread_cache, G2_thread); 633 __ mov(L1, G1); 634 __ mov(L4, G4); 635 __ mov(L5, G5_method); 636 #else 637 __ stx(G1, FP, -8 + STACK_BIAS); 638 __ stx(G4, FP, -16 + STACK_BIAS); 639 __ mov(G5_method, L5); 640 __ mov(G5_method, O0); // VM needs target method 641 __ mov(I7, O1); // VM needs caller's callsite 642 // Must be a leaf call... 643 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type); 644 __ delayed()->mov(G2_thread, L7_thread_cache); 645 __ mov(L7_thread_cache, G2_thread); 646 __ ldx(FP, -8 + STACK_BIAS, G1); 647 __ ldx(FP, -16 + STACK_BIAS, G4); 648 __ mov(L5, G5_method); 649 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 650 #endif /* _LP64 */ 651 652 __ restore(); // Restore args 653 __ bind(L); 654 } 655 656 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off, 657 Register scratch) { 658 if (TaggedStackInterpreter) { 659 RegisterOrConstant slot = tag_slot(st_off); 660 // have to store zero because local slots can be reused (rats!) 661 if (t == frame::TagValue) { 662 __ st_ptr(G0, base, slot); 663 } else if (t == frame::TagCategory2) { 664 __ st_ptr(G0, base, slot); 665 __ st_ptr(G0, base, next_tag_slot(st_off)); 666 } else { 667 __ mov(t, scratch); 668 __ st_ptr(scratch, base, slot); 669 } 670 } 671 } 672 673 674 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) { 675 RegisterOrConstant roc(arg_offset(st_off)); 676 return __ ensure_simm13_or_reg(roc, Rdisp); 677 } 678 679 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) { 680 RegisterOrConstant roc(next_arg_offset(st_off)); 681 return __ ensure_simm13_or_reg(roc, Rdisp); 682 } 683 684 685 RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) { 686 RegisterOrConstant roc(tag_offset(st_off)); 687 return __ ensure_simm13_or_reg(roc, Rdisp); 688 } 689 690 RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) { 691 RegisterOrConstant roc(next_tag_offset(st_off)); 692 return __ ensure_simm13_or_reg(roc, Rdisp); 693 } 694 695 696 // Stores long into offset pointed to by base 697 void AdapterGenerator::store_c2i_long(Register r, Register base, 698 const int st_off, bool is_stack) { 699 #ifdef _LP64 700 // In V9, longs are given 2 64-bit slots in the interpreter, but the 701 // data is passed in only 1 slot. 702 __ stx(r, base, next_arg_slot(st_off)); 703 #else 704 #ifdef COMPILER2 705 // Misaligned store of 64-bit data 706 __ stw(r, base, arg_slot(st_off)); // lo bits 707 __ srlx(r, 32, r); 708 __ stw(r, base, next_arg_slot(st_off)); // hi bits 709 #else 710 if (is_stack) { 711 // Misaligned store of 64-bit data 712 __ stw(r, base, arg_slot(st_off)); // lo bits 713 __ srlx(r, 32, r); 714 __ stw(r, base, next_arg_slot(st_off)); // hi bits 715 } else { 716 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits 717 __ stw(r , base, next_arg_slot(st_off)); // hi bits 718 } 719 #endif // COMPILER2 720 #endif // _LP64 721 tag_c2i_arg(frame::TagCategory2, base, st_off, r); 722 } 723 724 void AdapterGenerator::store_c2i_object(Register r, Register base, 725 const int st_off) { 726 __ st_ptr (r, base, arg_slot(st_off)); 727 tag_c2i_arg(frame::TagReference, base, st_off, r); 728 } 729 730 void AdapterGenerator::store_c2i_int(Register r, Register base, 731 const int st_off) { 732 __ st (r, base, arg_slot(st_off)); 733 tag_c2i_arg(frame::TagValue, base, st_off, r); 734 } 735 736 // Stores into offset pointed to by base 737 void AdapterGenerator::store_c2i_double(VMReg r_2, 738 VMReg r_1, Register base, const int st_off) { 739 #ifdef _LP64 740 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 741 // data is passed in only 1 slot. 742 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 743 #else 744 // Need to marshal 64-bit value from misaligned Lesp loads 745 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 746 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) ); 747 #endif 748 tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch); 749 } 750 751 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 752 const int st_off) { 753 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off)); 754 tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch); 755 } 756 757 void AdapterGenerator::gen_c2i_adapter( 758 int total_args_passed, 759 // VMReg max_arg, 760 int comp_args_on_stack, // VMRegStackSlots 761 const BasicType *sig_bt, 762 const VMRegPair *regs, 763 Label& skip_fixup) { 764 765 // Before we get into the guts of the C2I adapter, see if we should be here 766 // at all. We've come from compiled code and are attempting to jump to the 767 // interpreter, which means the caller made a static call to get here 768 // (vcalls always get a compiled target if there is one). Check for a 769 // compiled target. If there is one, we need to patch the caller's call. 770 // However we will run interpreted if we come thru here. The next pass 771 // thru the call site will run compiled. If we ran compiled here then 772 // we can (theorectically) do endless i2c->c2i->i2c transitions during 773 // deopt/uncommon trap cycles. If we always go interpreted here then 774 // we can have at most one and don't need to play any tricks to keep 775 // from endlessly growing the stack. 776 // 777 // Actually if we detected that we had an i2c->c2i transition here we 778 // ought to be able to reset the world back to the state of the interpreted 779 // call and not bother building another interpreter arg area. We don't 780 // do that at this point. 781 782 patch_callers_callsite(); 783 784 __ bind(skip_fixup); 785 786 // Since all args are passed on the stack, total_args_passed*wordSize is the 787 // space we need. Add in varargs area needed by the interpreter. Round up 788 // to stack alignment. 789 const int arg_size = total_args_passed * Interpreter::stackElementSize(); 790 const int varargs_area = 791 (frame::varargs_offset - frame::register_save_words)*wordSize; 792 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize); 793 794 int bias = STACK_BIAS; 795 const int interp_arg_offset = frame::varargs_offset*wordSize + 796 (total_args_passed-1)*Interpreter::stackElementSize(); 797 798 Register base = SP; 799 800 #ifdef _LP64 801 // In the 64bit build because of wider slots and STACKBIAS we can run 802 // out of bits in the displacement to do loads and stores. Use g3 as 803 // temporary displacement. 804 if (! __ is_simm13(extraspace)) { 805 __ set(extraspace, G3_scratch); 806 __ sub(SP, G3_scratch, SP); 807 } else { 808 __ sub(SP, extraspace, SP); 809 } 810 set_Rdisp(G3_scratch); 811 #else 812 __ sub(SP, extraspace, SP); 813 #endif // _LP64 814 815 // First write G1 (if used) to where ever it must go 816 for (int i=0; i<total_args_passed; i++) { 817 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias; 818 VMReg r_1 = regs[i].first(); 819 VMReg r_2 = regs[i].second(); 820 if (r_1 == G1_scratch->as_VMReg()) { 821 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 822 store_c2i_object(G1_scratch, base, st_off); 823 } else if (sig_bt[i] == T_LONG) { 824 assert(!TieredCompilation, "should not use register args for longs"); 825 store_c2i_long(G1_scratch, base, st_off, false); 826 } else { 827 store_c2i_int(G1_scratch, base, st_off); 828 } 829 } 830 } 831 832 // Now write the args into the outgoing interpreter space 833 for (int i=0; i<total_args_passed; i++) { 834 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias; 835 VMReg r_1 = regs[i].first(); 836 VMReg r_2 = regs[i].second(); 837 if (!r_1->is_valid()) { 838 assert(!r_2->is_valid(), ""); 839 continue; 840 } 841 // Skip G1 if found as we did it first in order to free it up 842 if (r_1 == G1_scratch->as_VMReg()) { 843 continue; 844 } 845 #ifdef ASSERT 846 bool G1_forced = false; 847 #endif // ASSERT 848 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1 849 #ifdef _LP64 850 Register ld_off = Rdisp; 851 __ set(reg2offset(r_1) + extraspace + bias, ld_off); 852 #else 853 int ld_off = reg2offset(r_1) + extraspace + bias; 854 #endif // _LP64 855 #ifdef ASSERT 856 G1_forced = true; 857 #endif // ASSERT 858 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle 859 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); 860 else __ ldx(base, ld_off, G1_scratch); 861 } 862 863 if (r_1->is_Register()) { 864 Register r = r_1->as_Register()->after_restore(); 865 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 866 store_c2i_object(r, base, st_off); 867 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 868 #ifndef _LP64 869 if (TieredCompilation) { 870 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs"); 871 } 872 #endif // _LP64 873 store_c2i_long(r, base, st_off, r_2->is_stack()); 874 } else { 875 store_c2i_int(r, base, st_off); 876 } 877 } else { 878 assert(r_1->is_FloatRegister(), ""); 879 if (sig_bt[i] == T_FLOAT) { 880 store_c2i_float(r_1->as_FloatRegister(), base, st_off); 881 } else { 882 assert(sig_bt[i] == T_DOUBLE, "wrong type"); 883 store_c2i_double(r_2, r_1, base, st_off); 884 } 885 } 886 } 887 888 #ifdef _LP64 889 // Need to reload G3_scratch, used for temporary displacements. 890 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 891 892 // Pass O5_savedSP as an argument to the interpreter. 893 // The interpreter will restore SP to this value before returning. 894 __ set(extraspace, G1); 895 __ add(SP, G1, O5_savedSP); 896 #else 897 // Pass O5_savedSP as an argument to the interpreter. 898 // The interpreter will restore SP to this value before returning. 899 __ add(SP, extraspace, O5_savedSP); 900 #endif // _LP64 901 902 __ mov((frame::varargs_offset)*wordSize - 903 1*Interpreter::stackElementSize()+bias+BytesPerWord, G1); 904 // Jump to the interpreter just as if interpreter was doing it. 905 __ jmpl(G3_scratch, 0, G0); 906 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp 907 // (really L0) is in use by the compiled frame as a generic temp. However, 908 // the interpreter does not know where its args are without some kind of 909 // arg pointer being passed in. Pass it in Gargs. 910 __ delayed()->add(SP, G1, Gargs); 911 } 912 913 void AdapterGenerator::gen_i2c_adapter( 914 int total_args_passed, 915 // VMReg max_arg, 916 int comp_args_on_stack, // VMRegStackSlots 917 const BasicType *sig_bt, 918 const VMRegPair *regs) { 919 920 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 921 // layout. Lesp was saved by the calling I-frame and will be restored on 922 // return. Meanwhile, outgoing arg space is all owned by the callee 923 // C-frame, so we can mangle it at will. After adjusting the frame size, 924 // hoist register arguments and repack other args according to the compiled 925 // code convention. Finally, end in a jump to the compiled code. The entry 926 // point address is the start of the buffer. 927 928 // We will only enter here from an interpreted frame and never from after 929 // passing thru a c2i. Azul allowed this but we do not. If we lose the 930 // race and use a c2i we will remain interpreted for the race loser(s). 931 // This removes all sorts of headaches on the x86 side and also eliminates 932 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 933 934 // As you can see from the list of inputs & outputs there are not a lot 935 // of temp registers to work with: mostly G1, G3 & G4. 936 937 // Inputs: 938 // G2_thread - TLS 939 // G5_method - Method oop 940 // G4 (Gargs) - Pointer to interpreter's args 941 // O0..O4 - free for scratch 942 // O5_savedSP - Caller's saved SP, to be restored if needed 943 // O6 - Current SP! 944 // O7 - Valid return address 945 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 946 947 // Outputs: 948 // G2_thread - TLS 949 // G1, G4 - Outgoing long args in 32-bit build 950 // O0-O5 - Outgoing args in compiled layout 951 // O6 - Adjusted or restored SP 952 // O7 - Valid return address 953 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 954 // F0-F7 - more outgoing args 955 956 957 // Gargs is the incoming argument base, and also an outgoing argument. 958 __ sub(Gargs, BytesPerWord, Gargs); 959 960 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME 961 // WITH O7 HOLDING A VALID RETURN PC 962 // 963 // | | 964 // : java stack : 965 // | | 966 // +--------------+ <--- start of outgoing args 967 // | receiver | | 968 // : rest of args : |---size is java-arg-words 969 // | | | 970 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I 971 // | | | 972 // : unused : |---Space for max Java stack, plus stack alignment 973 // | | | 974 // +--------------+ <--- SP + 16*wordsize 975 // | | 976 // : window : 977 // | | 978 // +--------------+ <--- SP 979 980 // WE REPACK THE STACK. We use the common calling convention layout as 981 // discovered by calling SharedRuntime::calling_convention. We assume it 982 // causes an arbitrary shuffle of memory, which may require some register 983 // temps to do the shuffle. We hope for (and optimize for) the case where 984 // temps are not needed. We may have to resize the stack slightly, in case 985 // we need alignment padding (32-bit interpreter can pass longs & doubles 986 // misaligned, but the compilers expect them aligned). 987 // 988 // | | 989 // : java stack : 990 // | | 991 // +--------------+ <--- start of outgoing args 992 // | pad, align | | 993 // +--------------+ | 994 // | ints, floats | |---Outgoing stack args, packed low. 995 // +--------------+ | First few args in registers. 996 // : doubles : | 997 // | longs | | 998 // +--------------+ <--- SP' + 16*wordsize 999 // | | 1000 // : window : 1001 // | | 1002 // +--------------+ <--- SP' 1003 1004 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME 1005 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP 1006 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN. 1007 1008 // Cut-out for having no stack args. Since up to 6 args are passed 1009 // in registers, we will commonly have no stack args. 1010 if (comp_args_on_stack > 0) { 1011 1012 // Convert VMReg stack slots to words. 1013 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1014 // Round up to miminum stack alignment, in wordSize 1015 comp_words_on_stack = round_to(comp_words_on_stack, 2); 1016 // Now compute the distance from Lesp to SP. This calculation does not 1017 // include the space for total_args_passed because Lesp has not yet popped 1018 // the arguments. 1019 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 1020 } 1021 1022 // Will jump to the compiled code just as if compiled code was doing it. 1023 // Pre-load the register-jump target early, to schedule it better. 1024 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); 1025 1026 // Now generate the shuffle code. Pick up all register args and move the 1027 // rest through G1_scratch. 1028 for (int i=0; i<total_args_passed; i++) { 1029 if (sig_bt[i] == T_VOID) { 1030 // Longs and doubles are passed in native word order, but misaligned 1031 // in the 32-bit build. 1032 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1033 continue; 1034 } 1035 1036 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the 1037 // 32-bit build and aligned in the 64-bit build. Look for the obvious 1038 // ldx/lddf optimizations. 1039 1040 // Load in argument order going down. 1041 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize(); 1042 set_Rdisp(G1_scratch); 1043 1044 VMReg r_1 = regs[i].first(); 1045 VMReg r_2 = regs[i].second(); 1046 if (!r_1->is_valid()) { 1047 assert(!r_2->is_valid(), ""); 1048 continue; 1049 } 1050 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9 1051 r_1 = F8->as_VMReg(); // as part of the load/store shuffle 1052 if (r_2->is_valid()) r_2 = r_1->next(); 1053 } 1054 if (r_1->is_Register()) { // Register argument 1055 Register r = r_1->as_Register()->after_restore(); 1056 if (!r_2->is_valid()) { 1057 __ ld(Gargs, arg_slot(ld_off), r); 1058 } else { 1059 #ifdef _LP64 1060 // In V9, longs are given 2 64-bit slots in the interpreter, but the 1061 // data is passed in only 1 slot. 1062 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ? 1063 next_arg_slot(ld_off) : arg_slot(ld_off); 1064 __ ldx(Gargs, slot, r); 1065 #else 1066 // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the 1067 // stack shuffle. Load the first 2 longs into G1/G4 later. 1068 #endif 1069 } 1070 } else { 1071 assert(r_1->is_FloatRegister(), ""); 1072 if (!r_2->is_valid()) { 1073 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); 1074 } else { 1075 #ifdef _LP64 1076 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 1077 // data is passed in only 1 slot. This code also handles longs that 1078 // are passed on the stack, but need a stack-to-stack move through a 1079 // spare float register. 1080 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? 1081 next_arg_slot(ld_off) : arg_slot(ld_off); 1082 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); 1083 #else 1084 // Need to marshal 64-bit value from misaligned Lesp loads 1085 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister()); 1086 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister()); 1087 #endif 1088 } 1089 } 1090 // Was the argument really intended to be on the stack, but was loaded 1091 // into F8/F9? 1092 if (regs[i].first()->is_stack()) { 1093 assert(r_1->as_FloatRegister() == F8, "fix this code"); 1094 // Convert stack slot to an SP offset 1095 int st_off = reg2offset(regs[i].first()) + STACK_BIAS; 1096 // Store down the shuffled stack word. Target address _is_ aligned. 1097 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp); 1098 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot); 1099 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot); 1100 } 1101 } 1102 bool made_space = false; 1103 #ifndef _LP64 1104 // May need to pick up a few long args in G1/G4 1105 bool g4_crushed = false; 1106 bool g3_crushed = false; 1107 for (int i=0; i<total_args_passed; i++) { 1108 if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) { 1109 // Load in argument order going down 1110 int ld_off = (total_args_passed-i)*Interpreter::stackElementSize(); 1111 // Need to marshal 64-bit value from misaligned Lesp loads 1112 Register r = regs[i].first()->as_Register()->after_restore(); 1113 if (r == G1 || r == G4) { 1114 assert(!g4_crushed, "ordering problem"); 1115 if (r == G4){ 1116 g4_crushed = true; 1117 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits 1118 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits 1119 } else { 1120 // better schedule this way 1121 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits 1122 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits 1123 } 1124 g3_crushed = true; 1125 __ sllx(r, 32, r); 1126 __ or3(G3_scratch, r, r); 1127 } else { 1128 assert(r->is_out(), "longs passed in two O registers"); 1129 __ ld (Gargs, arg_slot(ld_off) , r->successor()); // Load lo bits 1130 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits 1131 } 1132 } 1133 } 1134 #endif 1135 1136 // Jump to the compiled code just as if compiled code was doing it. 1137 // 1138 #ifndef _LP64 1139 if (g3_crushed) { 1140 // Rats load was wasted, at least it is in cache... 1141 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3); 1142 } 1143 #endif /* _LP64 */ 1144 1145 // 6243940 We might end up in handle_wrong_method if 1146 // the callee is deoptimized as we race thru here. If that 1147 // happens we don't want to take a safepoint because the 1148 // caller frame will look interpreted and arguments are now 1149 // "compiled" so it is much better to make this transition 1150 // invisible to the stack walking code. Unfortunately if 1151 // we try and find the callee by normal means a safepoint 1152 // is possible. So we stash the desired callee in the thread 1153 // and the vm will find there should this case occur. 1154 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); 1155 __ st_ptr(G5_method, callee_target_addr); 1156 1157 if (StressNonEntrant) { 1158 // Open a big window for deopt failure 1159 __ save_frame(0); 1160 __ mov(G0, L0); 1161 Label loop; 1162 __ bind(loop); 1163 __ sub(L0, 1, L0); 1164 __ br_null(L0, false, Assembler::pt, loop); 1165 __ delayed()->nop(); 1166 1167 __ restore(); 1168 } 1169 1170 1171 __ jmpl(G3, 0, G0); 1172 __ delayed()->nop(); 1173 } 1174 1175 // --------------------------------------------------------------- 1176 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1177 int total_args_passed, 1178 // VMReg max_arg, 1179 int comp_args_on_stack, // VMRegStackSlots 1180 const BasicType *sig_bt, 1181 const VMRegPair *regs, 1182 AdapterFingerPrint* fingerprint) { 1183 address i2c_entry = __ pc(); 1184 1185 AdapterGenerator agen(masm); 1186 1187 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 1188 1189 1190 // ------------------------------------------------------------------------- 1191 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The 1192 // args start out packed in the compiled layout. They need to be unpacked 1193 // into the interpreter layout. This will almost always require some stack 1194 // space. We grow the current (compiled) stack, then repack the args. We 1195 // finally end in a jump to the generic interpreter entry point. On exit 1196 // from the interpreter, the interpreter will restore our SP (lest the 1197 // compiled code, which relys solely on SP and not FP, get sick). 1198 1199 address c2i_unverified_entry = __ pc(); 1200 Label skip_fixup; 1201 { 1202 #if !defined(_LP64) && defined(COMPILER2) 1203 Register R_temp = L0; // another scratch register 1204 #else 1205 Register R_temp = G1; // another scratch register 1206 #endif 1207 1208 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1209 1210 __ verify_oop(O0); 1211 __ verify_oop(G5_method); 1212 __ load_klass(O0, G3_scratch); 1213 __ verify_oop(G3_scratch); 1214 1215 #if !defined(_LP64) && defined(COMPILER2) 1216 __ save(SP, -frame::register_save_words*wordSize, SP); 1217 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1218 __ verify_oop(R_temp); 1219 __ cmp(G3_scratch, R_temp); 1220 __ restore(); 1221 #else 1222 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1223 __ verify_oop(R_temp); 1224 __ cmp(G3_scratch, R_temp); 1225 #endif 1226 1227 Label ok, ok2; 1228 __ brx(Assembler::equal, false, Assembler::pt, ok); 1229 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method); 1230 __ jump_to(ic_miss, G3_scratch); 1231 __ delayed()->nop(); 1232 1233 __ bind(ok); 1234 // Method might have been compiled since the call site was patched to 1235 // interpreted if that is the case treat it as a miss so we can get 1236 // the call site corrected. 1237 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 1238 __ bind(ok2); 1239 __ br_null(G3_scratch, false, __ pt, skip_fixup); 1240 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 1241 __ jump_to(ic_miss, G3_scratch); 1242 __ delayed()->nop(); 1243 1244 } 1245 1246 address c2i_entry = __ pc(); 1247 1248 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); 1249 1250 __ flush(); 1251 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 1252 1253 } 1254 1255 // Helper function for native calling conventions 1256 static VMReg int_stk_helper( int i ) { 1257 // Bias any stack based VMReg we get by ignoring the window area 1258 // but not the register parameter save area. 1259 // 1260 // This is strange for the following reasons. We'd normally expect 1261 // the calling convention to return an VMReg for a stack slot 1262 // completely ignoring any abi reserved area. C2 thinks of that 1263 // abi area as only out_preserve_stack_slots. This does not include 1264 // the area allocated by the C abi to store down integer arguments 1265 // because the java calling convention does not use it. So 1266 // since c2 assumes that there are only out_preserve_stack_slots 1267 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack 1268 // location the c calling convention must add in this bias amount 1269 // to make up for the fact that the out_preserve_stack_slots is 1270 // insufficient for C calls. What a mess. I sure hope those 6 1271 // stack words were worth it on every java call! 1272 1273 // Another way of cleaning this up would be for out_preserve_stack_slots 1274 // to take a parameter to say whether it was C or java calling conventions. 1275 // Then things might look a little better (but not much). 1276 1277 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM; 1278 if( mem_parm_offset < 0 ) { 1279 return as_oRegister(i)->as_VMReg(); 1280 } else { 1281 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word; 1282 // Now return a biased offset that will be correct when out_preserve_slots is added back in 1283 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots()); 1284 } 1285 } 1286 1287 1288 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1289 VMRegPair *regs, 1290 int total_args_passed) { 1291 1292 // Return the number of VMReg stack_slots needed for the args. 1293 // This value does not include an abi space (like register window 1294 // save area). 1295 1296 // The native convention is V8 if !LP64 1297 // The LP64 convention is the V9 convention which is slightly more sane. 1298 1299 // We return the amount of VMReg stack slots we need to reserve for all 1300 // the arguments NOT counting out_preserve_stack_slots. Since we always 1301 // have space for storing at least 6 registers to memory we start with that. 1302 // See int_stk_helper for a further discussion. 1303 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); 1304 1305 #ifdef _LP64 1306 // V9 convention: All things "as-if" on double-wide stack slots. 1307 // Hoist any int/ptr/long's in the first 6 to int regs. 1308 // Hoist any flt/dbl's in the first 16 dbl regs. 1309 int j = 0; // Count of actual args, not HALVES 1310 for( int i=0; i<total_args_passed; i++, j++ ) { 1311 switch( sig_bt[i] ) { 1312 case T_BOOLEAN: 1313 case T_BYTE: 1314 case T_CHAR: 1315 case T_INT: 1316 case T_SHORT: 1317 regs[i].set1( int_stk_helper( j ) ); break; 1318 case T_LONG: 1319 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1320 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1321 case T_ARRAY: 1322 case T_OBJECT: 1323 regs[i].set2( int_stk_helper( j ) ); 1324 break; 1325 case T_FLOAT: 1326 if ( j < 16 ) { 1327 // V9ism: floats go in ODD registers 1328 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg()); 1329 } else { 1330 // V9ism: floats go in ODD stack slot 1331 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1))); 1332 } 1333 break; 1334 case T_DOUBLE: 1335 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1336 if ( j < 16 ) { 1337 // V9ism: doubles go in EVEN/ODD regs 1338 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg()); 1339 } else { 1340 // V9ism: doubles go in EVEN/ODD stack slots 1341 regs[i].set2(VMRegImpl::stack2reg(j<<1)); 1342 } 1343 break; 1344 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES 1345 default: 1346 ShouldNotReachHere(); 1347 } 1348 if (regs[i].first()->is_stack()) { 1349 int off = regs[i].first()->reg2stack(); 1350 if (off > max_stack_slots) max_stack_slots = off; 1351 } 1352 if (regs[i].second()->is_stack()) { 1353 int off = regs[i].second()->reg2stack(); 1354 if (off > max_stack_slots) max_stack_slots = off; 1355 } 1356 } 1357 1358 #else // _LP64 1359 // V8 convention: first 6 things in O-regs, rest on stack. 1360 // Alignment is willy-nilly. 1361 for( int i=0; i<total_args_passed; i++ ) { 1362 switch( sig_bt[i] ) { 1363 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1364 case T_ARRAY: 1365 case T_BOOLEAN: 1366 case T_BYTE: 1367 case T_CHAR: 1368 case T_FLOAT: 1369 case T_INT: 1370 case T_OBJECT: 1371 case T_SHORT: 1372 regs[i].set1( int_stk_helper( i ) ); 1373 break; 1374 case T_DOUBLE: 1375 case T_LONG: 1376 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1377 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) ); 1378 break; 1379 case T_VOID: regs[i].set_bad(); break; 1380 default: 1381 ShouldNotReachHere(); 1382 } 1383 if (regs[i].first()->is_stack()) { 1384 int off = regs[i].first()->reg2stack(); 1385 if (off > max_stack_slots) max_stack_slots = off; 1386 } 1387 if (regs[i].second()->is_stack()) { 1388 int off = regs[i].second()->reg2stack(); 1389 if (off > max_stack_slots) max_stack_slots = off; 1390 } 1391 } 1392 #endif // _LP64 1393 1394 return round_to(max_stack_slots + 1, 2); 1395 1396 } 1397 1398 1399 // --------------------------------------------------------------------------- 1400 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1401 switch (ret_type) { 1402 case T_FLOAT: 1403 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS); 1404 break; 1405 case T_DOUBLE: 1406 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS); 1407 break; 1408 } 1409 } 1410 1411 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1412 switch (ret_type) { 1413 case T_FLOAT: 1414 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0); 1415 break; 1416 case T_DOUBLE: 1417 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0); 1418 break; 1419 } 1420 } 1421 1422 // Check and forward and pending exception. Thread is stored in 1423 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there 1424 // is no exception handler. We merely pop this frame off and throw the 1425 // exception in the caller's frame. 1426 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) { 1427 Label L; 1428 __ br_null(Rex_oop, false, Assembler::pt, L); 1429 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1430 // Since this is a native call, we *know* the proper exception handler 1431 // without calling into the VM: it's the empty function. Just pop this 1432 // frame and then jump to forward_exception_entry; O7 will contain the 1433 // native caller's return PC. 1434 AddressLiteral exception_entry(StubRoutines::forward_exception_entry()); 1435 __ jump_to(exception_entry, G3_scratch); 1436 __ delayed()->restore(); // Pop this frame off. 1437 __ bind(L); 1438 } 1439 1440 // A simple move of integer like type 1441 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1442 if (src.first()->is_stack()) { 1443 if (dst.first()->is_stack()) { 1444 // stack to stack 1445 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1446 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1447 } else { 1448 // stack to reg 1449 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1450 } 1451 } else if (dst.first()->is_stack()) { 1452 // reg to stack 1453 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1454 } else { 1455 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1456 } 1457 } 1458 1459 // On 64 bit we will store integer like items to the stack as 1460 // 64 bits items (sparc abi) even though java would only store 1461 // 32bits for a parameter. On 32bit it will simply be 32 bits 1462 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 1463 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1464 if (src.first()->is_stack()) { 1465 if (dst.first()->is_stack()) { 1466 // stack to stack 1467 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1468 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1469 } else { 1470 // stack to reg 1471 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1472 } 1473 } else if (dst.first()->is_stack()) { 1474 // reg to stack 1475 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1476 } else { 1477 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1478 } 1479 } 1480 1481 1482 // An oop arg. Must pass a handle not the oop itself 1483 static void object_move(MacroAssembler* masm, 1484 OopMap* map, 1485 int oop_handle_offset, 1486 int framesize_in_slots, 1487 VMRegPair src, 1488 VMRegPair dst, 1489 bool is_receiver, 1490 int* receiver_offset) { 1491 1492 // must pass a handle. First figure out the location we use as a handle 1493 1494 if (src.first()->is_stack()) { 1495 // Oop is already on the stack 1496 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); 1497 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); 1498 __ ld_ptr(rHandle, 0, L4); 1499 #ifdef _LP64 1500 __ movr( Assembler::rc_z, L4, G0, rHandle ); 1501 #else 1502 __ tst( L4 ); 1503 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1504 #endif 1505 if (dst.first()->is_stack()) { 1506 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1507 } 1508 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1509 if (is_receiver) { 1510 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1511 } 1512 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1513 } else { 1514 // Oop is in an input register pass we must flush it to the stack 1515 const Register rOop = src.first()->as_Register(); 1516 const Register rHandle = L5; 1517 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; 1518 int offset = oop_slot*VMRegImpl::stack_slot_size; 1519 Label skip; 1520 __ st_ptr(rOop, SP, offset + STACK_BIAS); 1521 if (is_receiver) { 1522 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size; 1523 } 1524 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1525 __ add(SP, offset + STACK_BIAS, rHandle); 1526 #ifdef _LP64 1527 __ movr( Assembler::rc_z, rOop, G0, rHandle ); 1528 #else 1529 __ tst( rOop ); 1530 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1531 #endif 1532 1533 if (dst.first()->is_stack()) { 1534 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1535 } else { 1536 __ mov(rHandle, dst.first()->as_Register()); 1537 } 1538 } 1539 } 1540 1541 // A float arg may have to do float reg int reg conversion 1542 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1543 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1544 1545 if (src.first()->is_stack()) { 1546 if (dst.first()->is_stack()) { 1547 // stack to stack the easiest of the bunch 1548 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1549 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1550 } else { 1551 // stack to reg 1552 if (dst.first()->is_Register()) { 1553 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1554 } else { 1555 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1556 } 1557 } 1558 } else if (dst.first()->is_stack()) { 1559 // reg to stack 1560 if (src.first()->is_Register()) { 1561 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1562 } else { 1563 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1564 } 1565 } else { 1566 // reg to reg 1567 if (src.first()->is_Register()) { 1568 if (dst.first()->is_Register()) { 1569 // gpr -> gpr 1570 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1571 } else { 1572 // gpr -> fpr 1573 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS); 1574 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister()); 1575 } 1576 } else if (dst.first()->is_Register()) { 1577 // fpr -> gpr 1578 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS); 1579 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register()); 1580 } else { 1581 // fpr -> fpr 1582 // In theory these overlap but the ordering is such that this is likely a nop 1583 if ( src.first() != dst.first()) { 1584 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1585 } 1586 } 1587 } 1588 } 1589 1590 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1591 VMRegPair src_lo(src.first()); 1592 VMRegPair src_hi(src.second()); 1593 VMRegPair dst_lo(dst.first()); 1594 VMRegPair dst_hi(dst.second()); 1595 simple_move32(masm, src_lo, dst_lo); 1596 simple_move32(masm, src_hi, dst_hi); 1597 } 1598 1599 // A long move 1600 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1601 1602 // Do the simple ones here else do two int moves 1603 if (src.is_single_phys_reg() ) { 1604 if (dst.is_single_phys_reg()) { 1605 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1606 } else { 1607 // split src into two separate registers 1608 // Remember hi means hi address or lsw on sparc 1609 // Move msw to lsw 1610 if (dst.second()->is_reg()) { 1611 // MSW -> MSW 1612 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register()); 1613 // Now LSW -> LSW 1614 // this will only move lo -> lo and ignore hi 1615 VMRegPair split(dst.second()); 1616 simple_move32(masm, src, split); 1617 } else { 1618 VMRegPair split(src.first(), L4->as_VMReg()); 1619 // MSW -> MSW (lo ie. first word) 1620 __ srax(src.first()->as_Register(), 32, L4); 1621 split_long_move(masm, split, dst); 1622 } 1623 } 1624 } else if (dst.is_single_phys_reg()) { 1625 if (src.is_adjacent_aligned_on_stack(2)) { 1626 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1627 } else { 1628 // dst is a single reg. 1629 // Remember lo is low address not msb for stack slots 1630 // and lo is the "real" register for registers 1631 // src is 1632 1633 VMRegPair split; 1634 1635 if (src.first()->is_reg()) { 1636 // src.lo (msw) is a reg, src.hi is stk/reg 1637 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg] 1638 split.set_pair(dst.first(), src.first()); 1639 } else { 1640 // msw is stack move to L5 1641 // lsw is stack move to dst.lo (real reg) 1642 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5 1643 split.set_pair(dst.first(), L5->as_VMReg()); 1644 } 1645 1646 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg) 1647 // msw -> src.lo/L5, lsw -> dst.lo 1648 split_long_move(masm, src, split); 1649 1650 // So dst now has the low order correct position the 1651 // msw half 1652 __ sllx(split.first()->as_Register(), 32, L5); 1653 1654 const Register d = dst.first()->as_Register(); 1655 __ or3(L5, d, d); 1656 } 1657 } else { 1658 // For LP64 we can probably do better. 1659 split_long_move(masm, src, dst); 1660 } 1661 } 1662 1663 // A double move 1664 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1665 1666 // The painful thing here is that like long_move a VMRegPair might be 1667 // 1: a single physical register 1668 // 2: two physical registers (v8) 1669 // 3: a physical reg [lo] and a stack slot [hi] (v8) 1670 // 4: two stack slots 1671 1672 // Since src is always a java calling convention we know that the src pair 1673 // is always either all registers or all stack (and aligned?) 1674 1675 // in a register [lo] and a stack slot [hi] 1676 if (src.first()->is_stack()) { 1677 if (dst.first()->is_stack()) { 1678 // stack to stack the easiest of the bunch 1679 // ought to be a way to do this where if alignment is ok we use ldd/std when possible 1680 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1681 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1682 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1683 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1684 } else { 1685 // stack to reg 1686 if (dst.second()->is_stack()) { 1687 // stack -> reg, stack -> stack 1688 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1689 if (dst.first()->is_Register()) { 1690 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1691 } else { 1692 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1693 } 1694 // This was missing. (very rare case) 1695 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1696 } else { 1697 // stack -> reg 1698 // Eventually optimize for alignment QQQ 1699 if (dst.first()->is_Register()) { 1700 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1701 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register()); 1702 } else { 1703 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1704 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister()); 1705 } 1706 } 1707 } 1708 } else if (dst.first()->is_stack()) { 1709 // reg to stack 1710 if (src.first()->is_Register()) { 1711 // Eventually optimize for alignment QQQ 1712 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1713 if (src.second()->is_stack()) { 1714 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1715 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1716 } else { 1717 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS); 1718 } 1719 } else { 1720 // fpr to stack 1721 if (src.second()->is_stack()) { 1722 ShouldNotReachHere(); 1723 } else { 1724 // Is the stack aligned? 1725 if (reg2offset(dst.first()) & 0x7) { 1726 // No do as pairs 1727 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1728 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS); 1729 } else { 1730 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1731 } 1732 } 1733 } 1734 } else { 1735 // reg to reg 1736 if (src.first()->is_Register()) { 1737 if (dst.first()->is_Register()) { 1738 // gpr -> gpr 1739 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1740 __ mov(src.second()->as_Register(), dst.second()->as_Register()); 1741 } else { 1742 // gpr -> fpr 1743 // ought to be able to do a single store 1744 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS); 1745 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS); 1746 // ought to be able to do a single load 1747 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister()); 1748 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister()); 1749 } 1750 } else if (dst.first()->is_Register()) { 1751 // fpr -> gpr 1752 // ought to be able to do a single store 1753 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS); 1754 // ought to be able to do a single load 1755 // REMEMBER first() is low address not LSB 1756 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register()); 1757 if (dst.second()->is_Register()) { 1758 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register()); 1759 } else { 1760 __ ld(FP, -4 + STACK_BIAS, L4); 1761 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1762 } 1763 } else { 1764 // fpr -> fpr 1765 // In theory these overlap but the ordering is such that this is likely a nop 1766 if ( src.first() != dst.first()) { 1767 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1768 } 1769 } 1770 } 1771 } 1772 1773 // Creates an inner frame if one hasn't already been created, and 1774 // saves a copy of the thread in L7_thread_cache 1775 static void create_inner_frame(MacroAssembler* masm, bool* already_created) { 1776 if (!*already_created) { 1777 __ save_frame(0); 1778 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below 1779 // Don't use save_thread because it smashes G2 and we merely want to save a 1780 // copy 1781 __ mov(G2_thread, L7_thread_cache); 1782 *already_created = true; 1783 } 1784 } 1785 1786 // --------------------------------------------------------------------------- 1787 // Generate a native wrapper for a given method. The method takes arguments 1788 // in the Java compiled code convention, marshals them to the native 1789 // convention (handlizes oops, etc), transitions to native, makes the call, 1790 // returns to java state (possibly blocking), unhandlizes any result and 1791 // returns. 1792 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1793 methodHandle method, 1794 int total_in_args, 1795 int comp_args_on_stack, // in VMRegStackSlots 1796 BasicType *in_sig_bt, 1797 VMRegPair *in_regs, 1798 BasicType ret_type) { 1799 1800 // Native nmethod wrappers never take possesion of the oop arguments. 1801 // So the caller will gc the arguments. The only thing we need an 1802 // oopMap for is if the call is static 1803 // 1804 // An OopMap for lock (and class if static), and one for the VM call itself 1805 OopMapSet *oop_maps = new OopMapSet(); 1806 intptr_t start = (intptr_t)__ pc(); 1807 1808 // First thing make an ic check to see if we should even be here 1809 { 1810 Label L; 1811 const Register temp_reg = G3_scratch; 1812 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1813 __ verify_oop(O0); 1814 __ load_klass(O0, temp_reg); 1815 __ cmp(temp_reg, G5_inline_cache_reg); 1816 __ brx(Assembler::equal, true, Assembler::pt, L); 1817 __ delayed()->nop(); 1818 1819 __ jump_to(ic_miss, temp_reg); 1820 __ delayed()->nop(); 1821 __ align(CodeEntryAlignment); 1822 __ bind(L); 1823 } 1824 1825 int vep_offset = ((intptr_t)__ pc()) - start; 1826 1827 #ifdef COMPILER1 1828 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) { 1829 // Object.hashCode can pull the hashCode from the header word 1830 // instead of doing a full VM transition once it's been computed. 1831 // Since hashCode is usually polymorphic at call sites we can't do 1832 // this optimization at the call site without a lot of work. 1833 Label slowCase; 1834 Register receiver = O0; 1835 Register result = O0; 1836 Register header = G3_scratch; 1837 Register hash = G3_scratch; // overwrite header value with hash value 1838 Register mask = G1; // to get hash field from header 1839 1840 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked. 1841 // We depend on hash_mask being at most 32 bits and avoid the use of 1842 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 1843 // vm: see markOop.hpp. 1844 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header); 1845 __ sethi(markOopDesc::hash_mask, mask); 1846 __ btst(markOopDesc::unlocked_value, header); 1847 __ br(Assembler::zero, false, Assembler::pn, slowCase); 1848 if (UseBiasedLocking) { 1849 // Check if biased and fall through to runtime if so 1850 __ delayed()->nop(); 1851 __ btst(markOopDesc::biased_lock_bit_in_place, header); 1852 __ br(Assembler::notZero, false, Assembler::pn, slowCase); 1853 } 1854 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask); 1855 1856 // Check for a valid (non-zero) hash code and get its value. 1857 #ifdef _LP64 1858 __ srlx(header, markOopDesc::hash_shift, hash); 1859 #else 1860 __ srl(header, markOopDesc::hash_shift, hash); 1861 #endif 1862 __ andcc(hash, mask, hash); 1863 __ br(Assembler::equal, false, Assembler::pn, slowCase); 1864 __ delayed()->nop(); 1865 1866 // leaf return. 1867 __ retl(); 1868 __ delayed()->mov(hash, result); 1869 __ bind(slowCase); 1870 } 1871 #endif // COMPILER1 1872 1873 1874 // We have received a description of where all the java arg are located 1875 // on entry to the wrapper. We need to convert these args to where 1876 // the jni function will expect them. To figure out where they go 1877 // we convert the java signature to a C signature by inserting 1878 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1879 1880 int total_c_args = total_in_args + 1; 1881 if (method->is_static()) { 1882 total_c_args++; 1883 } 1884 1885 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1886 VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1887 1888 int argc = 0; 1889 out_sig_bt[argc++] = T_ADDRESS; 1890 if (method->is_static()) { 1891 out_sig_bt[argc++] = T_OBJECT; 1892 } 1893 1894 for (int i = 0; i < total_in_args ; i++ ) { 1895 out_sig_bt[argc++] = in_sig_bt[i]; 1896 } 1897 1898 // Now figure out where the args must be stored and how much stack space 1899 // they require (neglecting out_preserve_stack_slots but space for storing 1900 // the 1st six register arguments). It's weird see int_stk_helper. 1901 // 1902 int out_arg_slots; 1903 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 1904 1905 // Compute framesize for the wrapper. We need to handlize all oops in 1906 // registers. We must create space for them here that is disjoint from 1907 // the windowed save area because we have no control over when we might 1908 // flush the window again and overwrite values that gc has since modified. 1909 // (The live window race) 1910 // 1911 // We always just allocate 6 word for storing down these object. This allow 1912 // us to simply record the base and use the Ireg number to decide which 1913 // slot to use. (Note that the reg number is the inbound number not the 1914 // outbound number). 1915 // We must shuffle args to match the native convention, and include var-args space. 1916 1917 // Calculate the total number of stack slots we will need. 1918 1919 // First count the abi requirement plus all of the outgoing args 1920 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1921 1922 // Now the space for the inbound oop handle area 1923 1924 int oop_handle_offset = stack_slots; 1925 stack_slots += 6*VMRegImpl::slots_per_word; 1926 1927 // Now any space we need for handlizing a klass if static method 1928 1929 int oop_temp_slot_offset = 0; 1930 int klass_slot_offset = 0; 1931 int klass_offset = -1; 1932 int lock_slot_offset = 0; 1933 bool is_static = false; 1934 1935 if (method->is_static()) { 1936 klass_slot_offset = stack_slots; 1937 stack_slots += VMRegImpl::slots_per_word; 1938 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1939 is_static = true; 1940 } 1941 1942 // Plus a lock if needed 1943 1944 if (method->is_synchronized()) { 1945 lock_slot_offset = stack_slots; 1946 stack_slots += VMRegImpl::slots_per_word; 1947 } 1948 1949 // Now a place to save return value or as a temporary for any gpr -> fpr moves 1950 stack_slots += 2; 1951 1952 // Ok The space we have allocated will look like: 1953 // 1954 // 1955 // FP-> | | 1956 // |---------------------| 1957 // | 2 slots for moves | 1958 // |---------------------| 1959 // | lock box (if sync) | 1960 // |---------------------| <- lock_slot_offset 1961 // | klass (if static) | 1962 // |---------------------| <- klass_slot_offset 1963 // | oopHandle area | 1964 // |---------------------| <- oop_handle_offset 1965 // | outbound memory | 1966 // | based arguments | 1967 // | | 1968 // |---------------------| 1969 // | vararg area | 1970 // |---------------------| 1971 // | | 1972 // SP-> | out_preserved_slots | 1973 // 1974 // 1975 1976 1977 // Now compute actual number of stack words we need rounding to make 1978 // stack properly aligned. 1979 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word); 1980 1981 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 1982 1983 // Generate stack overflow check before creating frame 1984 __ generate_stack_overflow_check(stack_size); 1985 1986 // Generate a new frame for the wrapper. 1987 __ save(SP, -stack_size, SP); 1988 1989 int frame_complete = ((intptr_t)__ pc()) - start; 1990 1991 __ verify_thread(); 1992 1993 1994 // 1995 // We immediately shuffle the arguments so that any vm call we have to 1996 // make from here on out (sync slow path, jvmti, etc.) we will have 1997 // captured the oops from our caller and have a valid oopMap for 1998 // them. 1999 2000 // ----------------- 2001 // The Grand Shuffle 2002 // 2003 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2004 // (derived from JavaThread* which is in L7_thread_cache) and, if static, 2005 // the class mirror instead of a receiver. This pretty much guarantees that 2006 // register layout will not match. We ignore these extra arguments during 2007 // the shuffle. The shuffle is described by the two calling convention 2008 // vectors we have in our possession. We simply walk the java vector to 2009 // get the source locations and the c vector to get the destinations. 2010 // Because we have a new window and the argument registers are completely 2011 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about 2012 // here. 2013 2014 // This is a trick. We double the stack slots so we can claim 2015 // the oops in the caller's frame. Since we are sure to have 2016 // more args than the caller doubling is enough to make 2017 // sure we can capture all the incoming oop args from the 2018 // caller. 2019 // 2020 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2021 int c_arg = total_c_args - 1; 2022 // Record sp-based slot for receiver on stack for non-static methods 2023 int receiver_offset = -1; 2024 2025 // We move the arguments backward because the floating point registers 2026 // destination will always be to a register with a greater or equal register 2027 // number or the stack. 2028 2029 #ifdef ASSERT 2030 bool reg_destroyed[RegisterImpl::number_of_registers]; 2031 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2032 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2033 reg_destroyed[r] = false; 2034 } 2035 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2036 freg_destroyed[f] = false; 2037 } 2038 2039 #endif /* ASSERT */ 2040 2041 for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) { 2042 2043 #ifdef ASSERT 2044 if (in_regs[i].first()->is_Register()) { 2045 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2046 } else if (in_regs[i].first()->is_FloatRegister()) { 2047 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!"); 2048 } 2049 if (out_regs[c_arg].first()->is_Register()) { 2050 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2051 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 2052 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true; 2053 } 2054 #endif /* ASSERT */ 2055 2056 switch (in_sig_bt[i]) { 2057 case T_ARRAY: 2058 case T_OBJECT: 2059 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2060 ((i == 0) && (!is_static)), 2061 &receiver_offset); 2062 break; 2063 case T_VOID: 2064 break; 2065 2066 case T_FLOAT: 2067 float_move(masm, in_regs[i], out_regs[c_arg]); 2068 break; 2069 2070 case T_DOUBLE: 2071 assert( i + 1 < total_in_args && 2072 in_sig_bt[i + 1] == T_VOID && 2073 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2074 double_move(masm, in_regs[i], out_regs[c_arg]); 2075 break; 2076 2077 case T_LONG : 2078 long_move(masm, in_regs[i], out_regs[c_arg]); 2079 break; 2080 2081 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2082 2083 default: 2084 move32_64(masm, in_regs[i], out_regs[c_arg]); 2085 } 2086 } 2087 2088 // Pre-load a static method's oop into O1. Used both by locking code and 2089 // the normal JNI call code. 2090 if (method->is_static()) { 2091 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1); 2092 2093 // Now handlize the static class mirror in O1. It's known not-null. 2094 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2095 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2096 __ add(SP, klass_offset + STACK_BIAS, O1); 2097 } 2098 2099 2100 const Register L6_handle = L6; 2101 2102 if (method->is_synchronized()) { 2103 __ mov(O1, L6_handle); 2104 } 2105 2106 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2107 // except O6/O7. So if we must call out we must push a new frame. We immediately 2108 // push a new frame and flush the windows. 2109 2110 #ifdef _LP64 2111 intptr_t thepc = (intptr_t) __ pc(); 2112 { 2113 address here = __ pc(); 2114 // Call the next instruction 2115 __ call(here + 8, relocInfo::none); 2116 __ delayed()->nop(); 2117 } 2118 #else 2119 intptr_t thepc = __ load_pc_address(O7, 0); 2120 #endif /* _LP64 */ 2121 2122 // We use the same pc/oopMap repeatedly when we call out 2123 oop_maps->add_gc_map(thepc - start, map); 2124 2125 // O7 now has the pc loaded that we will use when we finally call to native. 2126 2127 // Save thread in L7; it crosses a bunch of VM calls below 2128 // Don't use save_thread because it smashes G2 and we merely 2129 // want to save a copy 2130 __ mov(G2_thread, L7_thread_cache); 2131 2132 2133 // If we create an inner frame once is plenty 2134 // when we create it we must also save G2_thread 2135 bool inner_frame_created = false; 2136 2137 // dtrace method entry support 2138 { 2139 SkipIfEqual skip_if( 2140 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2141 // create inner frame 2142 __ save_frame(0); 2143 __ mov(G2_thread, L7_thread_cache); 2144 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2145 __ call_VM_leaf(L7_thread_cache, 2146 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2147 G2_thread, O1); 2148 __ restore(); 2149 } 2150 2151 // RedefineClasses() tracing support for obsolete method entry 2152 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2153 // create inner frame 2154 __ save_frame(0); 2155 __ mov(G2_thread, L7_thread_cache); 2156 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2157 __ call_VM_leaf(L7_thread_cache, 2158 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2159 G2_thread, O1); 2160 __ restore(); 2161 } 2162 2163 // We are in the jni frame unless saved_frame is true in which case 2164 // we are in one frame deeper (the "inner" frame). If we are in the 2165 // "inner" frames the args are in the Iregs and if the jni frame then 2166 // they are in the Oregs. 2167 // If we ever need to go to the VM (for locking, jvmti) then 2168 // we will always be in the "inner" frame. 2169 2170 // Lock a synchronized method 2171 int lock_offset = -1; // Set if locked 2172 if (method->is_synchronized()) { 2173 Register Roop = O1; 2174 const Register L3_box = L3; 2175 2176 create_inner_frame(masm, &inner_frame_created); 2177 2178 __ ld_ptr(I1, 0, O1); 2179 Label done; 2180 2181 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size); 2182 __ add(FP, lock_offset+STACK_BIAS, L3_box); 2183 #ifdef ASSERT 2184 if (UseBiasedLocking) { 2185 // making the box point to itself will make it clear it went unused 2186 // but also be obviously invalid 2187 __ st_ptr(L3_box, L3_box, 0); 2188 } 2189 #endif // ASSERT 2190 // 2191 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch 2192 // 2193 __ compiler_lock_object(Roop, L1, L3_box, L2); 2194 __ br(Assembler::equal, false, Assembler::pt, done); 2195 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box); 2196 2197 2198 // None of the above fast optimizations worked so we have to get into the 2199 // slow case of monitor enter. Inline a special case of call_VM that 2200 // disallows any pending_exception. 2201 __ mov(Roop, O0); // Need oop in O0 2202 __ mov(L3_box, O1); 2203 2204 // Record last_Java_sp, in case the VM code releases the JVM lock. 2205 2206 __ set_last_Java_frame(FP, I7); 2207 2208 // do the call 2209 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2210 __ delayed()->mov(L7_thread_cache, O2); 2211 2212 __ restore_thread(L7_thread_cache); // restore G2_thread 2213 __ reset_last_Java_frame(); 2214 2215 #ifdef ASSERT 2216 { Label L; 2217 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2218 __ br_null(O0, false, Assembler::pt, L); 2219 __ delayed()->nop(); 2220 __ stop("no pending exception allowed on exit from IR::monitorenter"); 2221 __ bind(L); 2222 } 2223 #endif 2224 __ bind(done); 2225 } 2226 2227 2228 // Finally just about ready to make the JNI call 2229 2230 __ flush_windows(); 2231 if (inner_frame_created) { 2232 __ restore(); 2233 } else { 2234 // Store only what we need from this frame 2235 // QQQ I think that non-v9 (like we care) we don't need these saves 2236 // either as the flush traps and the current window goes too. 2237 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2238 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2239 } 2240 2241 // get JNIEnv* which is first argument to native 2242 2243 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2244 2245 // Use that pc we placed in O7 a while back as the current frame anchor 2246 2247 __ set_last_Java_frame(SP, O7); 2248 2249 // Transition from _thread_in_Java to _thread_in_native. 2250 __ set(_thread_in_native, G3_scratch); 2251 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2252 2253 // We flushed the windows ages ago now mark them as flushed 2254 2255 // mark windows as flushed 2256 __ set(JavaFrameAnchor::flushed, G3_scratch); 2257 2258 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 2259 2260 #ifdef _LP64 2261 AddressLiteral dest(method->native_function()); 2262 __ relocate(relocInfo::runtime_call_type); 2263 __ jumpl_to(dest, O7, O7); 2264 #else 2265 __ call(method->native_function(), relocInfo::runtime_call_type); 2266 #endif 2267 __ delayed()->st(G3_scratch, flags); 2268 2269 __ restore_thread(L7_thread_cache); // restore G2_thread 2270 2271 // Unpack native results. For int-types, we do any needed sign-extension 2272 // and move things into I0. The return value there will survive any VM 2273 // calls for blocking or unlocking. An FP or OOP result (handle) is done 2274 // specially in the slow-path code. 2275 switch (ret_type) { 2276 case T_VOID: break; // Nothing to do! 2277 case T_FLOAT: break; // Got it where we want it (unless slow-path) 2278 case T_DOUBLE: break; // Got it where we want it (unless slow-path) 2279 // In 64 bits build result is in O0, in O0, O1 in 32bit build 2280 case T_LONG: 2281 #ifndef _LP64 2282 __ mov(O1, I1); 2283 #endif 2284 // Fall thru 2285 case T_OBJECT: // Really a handle 2286 case T_ARRAY: 2287 case T_INT: 2288 __ mov(O0, I0); 2289 break; 2290 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false 2291 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break; 2292 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value! 2293 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break; 2294 break; // Cannot de-handlize until after reclaiming jvm_lock 2295 default: 2296 ShouldNotReachHere(); 2297 } 2298 2299 // must we block? 2300 2301 // Block, if necessary, before resuming in _thread_in_Java state. 2302 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2303 { Label no_block; 2304 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 2305 2306 // Switch thread to "native transition" state before reading the synchronization state. 2307 // This additional state is necessary because reading and testing the synchronization 2308 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2309 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2310 // VM thread changes sync state to synchronizing and suspends threads for GC. 2311 // Thread A is resumed to finish this native method, but doesn't block here since it 2312 // didn't see any synchronization is progress, and escapes. 2313 __ set(_thread_in_native_trans, G3_scratch); 2314 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2315 if(os::is_MP()) { 2316 if (UseMembar) { 2317 // Force this write out before the read below 2318 __ membar(Assembler::StoreLoad); 2319 } else { 2320 // Write serialization page so VM thread can do a pseudo remote membar. 2321 // We use the current thread pointer to calculate a thread specific 2322 // offset to write to within the page. This minimizes bus traffic 2323 // due to cache line collision. 2324 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 2325 } 2326 } 2327 __ load_contents(sync_state, G3_scratch); 2328 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 2329 2330 Label L; 2331 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); 2332 __ br(Assembler::notEqual, false, Assembler::pn, L); 2333 __ delayed()->ld(suspend_state, G3_scratch); 2334 __ cmp(G3_scratch, 0); 2335 __ br(Assembler::equal, false, Assembler::pt, no_block); 2336 __ delayed()->nop(); 2337 __ bind(L); 2338 2339 // Block. Save any potential method result value before the operation and 2340 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2341 // lets us share the oopMap we used when we went native rather the create 2342 // a distinct one for this pc 2343 // 2344 save_native_result(masm, ret_type, stack_slots); 2345 __ call_VM_leaf(L7_thread_cache, 2346 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2347 G2_thread); 2348 2349 // Restore any method result value 2350 restore_native_result(masm, ret_type, stack_slots); 2351 __ bind(no_block); 2352 } 2353 2354 // thread state is thread_in_native_trans. Any safepoint blocking has already 2355 // happened so we can now change state to _thread_in_Java. 2356 2357 2358 __ set(_thread_in_Java, G3_scratch); 2359 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2360 2361 2362 Label no_reguard; 2363 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch); 2364 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled); 2365 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard); 2366 __ delayed()->nop(); 2367 2368 save_native_result(masm, ret_type, stack_slots); 2369 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2370 __ delayed()->nop(); 2371 2372 __ restore_thread(L7_thread_cache); // restore G2_thread 2373 restore_native_result(masm, ret_type, stack_slots); 2374 2375 __ bind(no_reguard); 2376 2377 // Handle possible exception (will unlock if necessary) 2378 2379 // native result if any is live in freg or I0 (and I1 if long and 32bit vm) 2380 2381 // Unlock 2382 if (method->is_synchronized()) { 2383 Label done; 2384 Register I2_ex_oop = I2; 2385 const Register L3_box = L3; 2386 // Get locked oop from the handle we passed to jni 2387 __ ld_ptr(L6_handle, 0, L4); 2388 __ add(SP, lock_offset+STACK_BIAS, L3_box); 2389 // Must save pending exception around the slow-path VM call. Since it's a 2390 // leaf call, the pending exception (if any) can be kept in a register. 2391 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop); 2392 // Now unlock 2393 // (Roop, Rmark, Rbox, Rscratch) 2394 __ compiler_unlock_object(L4, L1, L3_box, L2); 2395 __ br(Assembler::equal, false, Assembler::pt, done); 2396 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box); 2397 2398 // save and restore any potential method result value around the unlocking 2399 // operation. Will save in I0 (or stack for FP returns). 2400 save_native_result(masm, ret_type, stack_slots); 2401 2402 // Must clear pending-exception before re-entering the VM. Since this is 2403 // a leaf call, pending-exception-oop can be safely kept in a register. 2404 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset())); 2405 2406 // slow case of monitor enter. Inline a special case of call_VM that 2407 // disallows any pending_exception. 2408 __ mov(L3_box, O1); 2409 2410 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type); 2411 __ delayed()->mov(L4, O0); // Need oop in O0 2412 2413 __ restore_thread(L7_thread_cache); // restore G2_thread 2414 2415 #ifdef ASSERT 2416 { Label L; 2417 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2418 __ br_null(O0, false, Assembler::pt, L); 2419 __ delayed()->nop(); 2420 __ stop("no pending exception allowed on exit from IR::monitorexit"); 2421 __ bind(L); 2422 } 2423 #endif 2424 restore_native_result(masm, ret_type, stack_slots); 2425 // check_forward_pending_exception jump to forward_exception if any pending 2426 // exception is set. The forward_exception routine expects to see the 2427 // exception in pending_exception and not in a register. Kind of clumsy, 2428 // since all folks who branch to forward_exception must have tested 2429 // pending_exception first and hence have it in a register already. 2430 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset())); 2431 __ bind(done); 2432 } 2433 2434 // Tell dtrace about this method exit 2435 { 2436 SkipIfEqual skip_if( 2437 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2438 save_native_result(masm, ret_type, stack_slots); 2439 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2440 __ call_VM_leaf(L7_thread_cache, 2441 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2442 G2_thread, O1); 2443 restore_native_result(masm, ret_type, stack_slots); 2444 } 2445 2446 // Clear "last Java frame" SP and PC. 2447 __ verify_thread(); // G2_thread must be correct 2448 __ reset_last_Java_frame(); 2449 2450 // Unpack oop result 2451 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2452 Label L; 2453 __ addcc(G0, I0, G0); 2454 __ brx(Assembler::notZero, true, Assembler::pt, L); 2455 __ delayed()->ld_ptr(I0, 0, I0); 2456 __ mov(G0, I0); 2457 __ bind(L); 2458 __ verify_oop(I0); 2459 } 2460 2461 // reset handle block 2462 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2463 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2464 2465 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2466 check_forward_pending_exception(masm, G3_scratch); 2467 2468 2469 // Return 2470 2471 #ifndef _LP64 2472 if (ret_type == T_LONG) { 2473 2474 // Must leave proper result in O0,O1 and G1 (c2/tiered only) 2475 __ sllx(I0, 32, G1); // Shift bits into high G1 2476 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 2477 __ or3 (I1, G1, G1); // OR 64 bits into G1 2478 } 2479 #endif 2480 2481 __ ret(); 2482 __ delayed()->restore(); 2483 2484 __ flush(); 2485 2486 nmethod *nm = nmethod::new_native_nmethod(method, 2487 masm->code(), 2488 vep_offset, 2489 frame_complete, 2490 stack_slots / VMRegImpl::slots_per_word, 2491 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2492 in_ByteSize(lock_offset), 2493 oop_maps); 2494 return nm; 2495 2496 } 2497 2498 #ifdef HAVE_DTRACE_H 2499 // --------------------------------------------------------------------------- 2500 // Generate a dtrace nmethod for a given signature. The method takes arguments 2501 // in the Java compiled code convention, marshals them to the native 2502 // abi and then leaves nops at the position you would expect to call a native 2503 // function. When the probe is enabled the nops are replaced with a trap 2504 // instruction that dtrace inserts and the trace will cause a notification 2505 // to dtrace. 2506 // 2507 // The probes are only able to take primitive types and java/lang/String as 2508 // arguments. No other java types are allowed. Strings are converted to utf8 2509 // strings so that from dtrace point of view java strings are converted to C 2510 // strings. There is an arbitrary fixed limit on the total space that a method 2511 // can use for converting the strings. (256 chars per string in the signature). 2512 // So any java string larger then this is truncated. 2513 2514 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 }; 2515 static bool offsets_initialized = false; 2516 2517 static VMRegPair reg64_to_VMRegPair(Register r) { 2518 VMRegPair ret; 2519 if (wordSize == 8) { 2520 ret.set2(r->as_VMReg()); 2521 } else { 2522 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg()); 2523 } 2524 return ret; 2525 } 2526 2527 2528 nmethod *SharedRuntime::generate_dtrace_nmethod( 2529 MacroAssembler *masm, methodHandle method) { 2530 2531 2532 // generate_dtrace_nmethod is guarded by a mutex so we are sure to 2533 // be single threaded in this method. 2534 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be"); 2535 2536 // Fill in the signature array, for the calling-convention call. 2537 int total_args_passed = method->size_of_parameters(); 2538 2539 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); 2540 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); 2541 2542 // The signature we are going to use for the trap that dtrace will see 2543 // java/lang/String is converted. We drop "this" and any other object 2544 // is converted to NULL. (A one-slot java/lang/Long object reference 2545 // is converted to a two-slot long, which is why we double the allocation). 2546 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2); 2547 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2); 2548 2549 int i=0; 2550 int total_strings = 0; 2551 int first_arg_to_pass = 0; 2552 int total_c_args = 0; 2553 2554 // Skip the receiver as dtrace doesn't want to see it 2555 if( !method->is_static() ) { 2556 in_sig_bt[i++] = T_OBJECT; 2557 first_arg_to_pass = 1; 2558 } 2559 2560 SignatureStream ss(method->signature()); 2561 for ( ; !ss.at_return_type(); ss.next()) { 2562 BasicType bt = ss.type(); 2563 in_sig_bt[i++] = bt; // Collect remaining bits of signature 2564 out_sig_bt[total_c_args++] = bt; 2565 if( bt == T_OBJECT) { 2566 symbolOop s = ss.as_symbol_or_null(); 2567 if (s == vmSymbols::java_lang_String()) { 2568 total_strings++; 2569 out_sig_bt[total_c_args-1] = T_ADDRESS; 2570 } else if (s == vmSymbols::java_lang_Boolean() || 2571 s == vmSymbols::java_lang_Byte()) { 2572 out_sig_bt[total_c_args-1] = T_BYTE; 2573 } else if (s == vmSymbols::java_lang_Character() || 2574 s == vmSymbols::java_lang_Short()) { 2575 out_sig_bt[total_c_args-1] = T_SHORT; 2576 } else if (s == vmSymbols::java_lang_Integer() || 2577 s == vmSymbols::java_lang_Float()) { 2578 out_sig_bt[total_c_args-1] = T_INT; 2579 } else if (s == vmSymbols::java_lang_Long() || 2580 s == vmSymbols::java_lang_Double()) { 2581 out_sig_bt[total_c_args-1] = T_LONG; 2582 out_sig_bt[total_c_args++] = T_VOID; 2583 } 2584 } else if ( bt == T_LONG || bt == T_DOUBLE ) { 2585 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots 2586 // We convert double to long 2587 out_sig_bt[total_c_args-1] = T_LONG; 2588 out_sig_bt[total_c_args++] = T_VOID; 2589 } else if ( bt == T_FLOAT) { 2590 // We convert float to int 2591 out_sig_bt[total_c_args-1] = T_INT; 2592 } 2593 } 2594 2595 assert(i==total_args_passed, "validly parsed signature"); 2596 2597 // Now get the compiled-Java layout as input arguments 2598 int comp_args_on_stack; 2599 comp_args_on_stack = SharedRuntime::java_calling_convention( 2600 in_sig_bt, in_regs, total_args_passed, false); 2601 2602 // We have received a description of where all the java arg are located 2603 // on entry to the wrapper. We need to convert these args to where 2604 // the a native (non-jni) function would expect them. To figure out 2605 // where they go we convert the java signature to a C signature and remove 2606 // T_VOID for any long/double we might have received. 2607 2608 2609 // Now figure out where the args must be stored and how much stack space 2610 // they require (neglecting out_preserve_stack_slots but space for storing 2611 // the 1st six register arguments). It's weird see int_stk_helper. 2612 // 2613 int out_arg_slots; 2614 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 2615 2616 // Calculate the total number of stack slots we will need. 2617 2618 // First count the abi requirement plus all of the outgoing args 2619 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2620 2621 // Plus a temp for possible converion of float/double/long register args 2622 2623 int conversion_temp = stack_slots; 2624 stack_slots += 2; 2625 2626 2627 // Now space for the string(s) we must convert 2628 2629 int string_locs = stack_slots; 2630 stack_slots += total_strings * 2631 (max_dtrace_string_size / VMRegImpl::stack_slot_size); 2632 2633 // Ok The space we have allocated will look like: 2634 // 2635 // 2636 // FP-> | | 2637 // |---------------------| 2638 // | string[n] | 2639 // |---------------------| <- string_locs[n] 2640 // | string[n-1] | 2641 // |---------------------| <- string_locs[n-1] 2642 // | ... | 2643 // | ... | 2644 // |---------------------| <- string_locs[1] 2645 // | string[0] | 2646 // |---------------------| <- string_locs[0] 2647 // | temp | 2648 // |---------------------| <- conversion_temp 2649 // | outbound memory | 2650 // | based arguments | 2651 // | | 2652 // |---------------------| 2653 // | | 2654 // SP-> | out_preserved_slots | 2655 // 2656 // 2657 2658 // Now compute actual number of stack words we need rounding to make 2659 // stack properly aligned. 2660 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word); 2661 2662 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2663 2664 intptr_t start = (intptr_t)__ pc(); 2665 2666 // First thing make an ic check to see if we should even be here 2667 2668 { 2669 Label L; 2670 const Register temp_reg = G3_scratch; 2671 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 2672 __ verify_oop(O0); 2673 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg); 2674 __ cmp(temp_reg, G5_inline_cache_reg); 2675 __ brx(Assembler::equal, true, Assembler::pt, L); 2676 __ delayed()->nop(); 2677 2678 __ jump_to(ic_miss, temp_reg); 2679 __ delayed()->nop(); 2680 __ align(CodeEntryAlignment); 2681 __ bind(L); 2682 } 2683 2684 int vep_offset = ((intptr_t)__ pc()) - start; 2685 2686 2687 // The instruction at the verified entry point must be 5 bytes or longer 2688 // because it can be patched on the fly by make_non_entrant. The stack bang 2689 // instruction fits that requirement. 2690 2691 // Generate stack overflow check before creating frame 2692 __ generate_stack_overflow_check(stack_size); 2693 2694 assert(((intptr_t)__ pc() - start - vep_offset) >= 5, 2695 "valid size for make_non_entrant"); 2696 2697 // Generate a new frame for the wrapper. 2698 __ save(SP, -stack_size, SP); 2699 2700 // Frame is now completed as far a size and linkage. 2701 2702 int frame_complete = ((intptr_t)__ pc()) - start; 2703 2704 #ifdef ASSERT 2705 bool reg_destroyed[RegisterImpl::number_of_registers]; 2706 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2707 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2708 reg_destroyed[r] = false; 2709 } 2710 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2711 freg_destroyed[f] = false; 2712 } 2713 2714 #endif /* ASSERT */ 2715 2716 VMRegPair zero; 2717 const Register g0 = G0; // without this we get a compiler warning (why??) 2718 zero.set2(g0->as_VMReg()); 2719 2720 int c_arg, j_arg; 2721 2722 Register conversion_off = noreg; 2723 2724 for (j_arg = first_arg_to_pass, c_arg = 0 ; 2725 j_arg < total_args_passed ; j_arg++, c_arg++ ) { 2726 2727 VMRegPair src = in_regs[j_arg]; 2728 VMRegPair dst = out_regs[c_arg]; 2729 2730 #ifdef ASSERT 2731 if (src.first()->is_Register()) { 2732 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!"); 2733 } else if (src.first()->is_FloatRegister()) { 2734 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding( 2735 FloatRegisterImpl::S)], "ack!"); 2736 } 2737 if (dst.first()->is_Register()) { 2738 reg_destroyed[dst.first()->as_Register()->encoding()] = true; 2739 } else if (dst.first()->is_FloatRegister()) { 2740 freg_destroyed[dst.first()->as_FloatRegister()->encoding( 2741 FloatRegisterImpl::S)] = true; 2742 } 2743 #endif /* ASSERT */ 2744 2745 switch (in_sig_bt[j_arg]) { 2746 case T_ARRAY: 2747 case T_OBJECT: 2748 { 2749 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT || 2750 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) { 2751 // need to unbox a one-slot value 2752 Register in_reg = L0; 2753 Register tmp = L2; 2754 if ( src.first()->is_reg() ) { 2755 in_reg = src.first()->as_Register(); 2756 } else { 2757 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS), 2758 "must be"); 2759 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg); 2760 } 2761 // If the final destination is an acceptable register 2762 if ( dst.first()->is_reg() ) { 2763 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) { 2764 tmp = dst.first()->as_Register(); 2765 } 2766 } 2767 2768 Label skipUnbox; 2769 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) { 2770 __ mov(G0, tmp->successor()); 2771 } 2772 __ br_null(in_reg, true, Assembler::pn, skipUnbox); 2773 __ delayed()->mov(G0, tmp); 2774 2775 BasicType bt = out_sig_bt[c_arg]; 2776 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt); 2777 switch (bt) { 2778 case T_BYTE: 2779 __ ldub(in_reg, box_offset, tmp); break; 2780 case T_SHORT: 2781 __ lduh(in_reg, box_offset, tmp); break; 2782 case T_INT: 2783 __ ld(in_reg, box_offset, tmp); break; 2784 case T_LONG: 2785 __ ld_long(in_reg, box_offset, tmp); break; 2786 default: ShouldNotReachHere(); 2787 } 2788 2789 __ bind(skipUnbox); 2790 // If tmp wasn't final destination copy to final destination 2791 if (tmp == L2) { 2792 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2); 2793 if (out_sig_bt[c_arg] == T_LONG) { 2794 long_move(masm, tmp_as_VM, dst); 2795 } else { 2796 move32_64(masm, tmp_as_VM, out_regs[c_arg]); 2797 } 2798 } 2799 if (out_sig_bt[c_arg] == T_LONG) { 2800 assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); 2801 ++c_arg; // move over the T_VOID to keep the loop indices in sync 2802 } 2803 } else if (out_sig_bt[c_arg] == T_ADDRESS) { 2804 Register s = 2805 src.first()->is_reg() ? src.first()->as_Register() : L2; 2806 Register d = 2807 dst.first()->is_reg() ? dst.first()->as_Register() : L2; 2808 2809 // We store the oop now so that the conversion pass can reach 2810 // while in the inner frame. This will be the only store if 2811 // the oop is NULL. 2812 if (s != L2) { 2813 // src is register 2814 if (d != L2) { 2815 // dst is register 2816 __ mov(s, d); 2817 } else { 2818 assert(Assembler::is_simm13(reg2offset(dst.first()) + 2819 STACK_BIAS), "must be"); 2820 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS); 2821 } 2822 } else { 2823 // src not a register 2824 assert(Assembler::is_simm13(reg2offset(src.first()) + 2825 STACK_BIAS), "must be"); 2826 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d); 2827 if (d == L2) { 2828 assert(Assembler::is_simm13(reg2offset(dst.first()) + 2829 STACK_BIAS), "must be"); 2830 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS); 2831 } 2832 } 2833 } else if (out_sig_bt[c_arg] != T_VOID) { 2834 // Convert the arg to NULL 2835 if (dst.first()->is_reg()) { 2836 __ mov(G0, dst.first()->as_Register()); 2837 } else { 2838 assert(Assembler::is_simm13(reg2offset(dst.first()) + 2839 STACK_BIAS), "must be"); 2840 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS); 2841 } 2842 } 2843 } 2844 break; 2845 case T_VOID: 2846 break; 2847 2848 case T_FLOAT: 2849 if (src.first()->is_stack()) { 2850 // Stack to stack/reg is simple 2851 move32_64(masm, src, dst); 2852 } else { 2853 if (dst.first()->is_reg()) { 2854 // freg -> reg 2855 int off = 2856 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 2857 Register d = dst.first()->as_Register(); 2858 if (Assembler::is_simm13(off)) { 2859 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2860 SP, off); 2861 __ ld(SP, off, d); 2862 } else { 2863 if (conversion_off == noreg) { 2864 __ set(off, L6); 2865 conversion_off = L6; 2866 } 2867 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2868 SP, conversion_off); 2869 __ ld(SP, conversion_off , d); 2870 } 2871 } else { 2872 // freg -> mem 2873 int off = STACK_BIAS + reg2offset(dst.first()); 2874 if (Assembler::is_simm13(off)) { 2875 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2876 SP, off); 2877 } else { 2878 if (conversion_off == noreg) { 2879 __ set(off, L6); 2880 conversion_off = L6; 2881 } 2882 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2883 SP, conversion_off); 2884 } 2885 } 2886 } 2887 break; 2888 2889 case T_DOUBLE: 2890 assert( j_arg + 1 < total_args_passed && 2891 in_sig_bt[j_arg + 1] == T_VOID && 2892 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2893 if (src.first()->is_stack()) { 2894 // Stack to stack/reg is simple 2895 long_move(masm, src, dst); 2896 } else { 2897 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2; 2898 2899 // Destination could be an odd reg on 32bit in which case 2900 // we can't load direct to the destination. 2901 2902 if (!d->is_even() && wordSize == 4) { 2903 d = L2; 2904 } 2905 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 2906 if (Assembler::is_simm13(off)) { 2907 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 2908 SP, off); 2909 __ ld_long(SP, off, d); 2910 } else { 2911 if (conversion_off == noreg) { 2912 __ set(off, L6); 2913 conversion_off = L6; 2914 } 2915 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 2916 SP, conversion_off); 2917 __ ld_long(SP, conversion_off, d); 2918 } 2919 if (d == L2) { 2920 long_move(masm, reg64_to_VMRegPair(L2), dst); 2921 } 2922 } 2923 break; 2924 2925 case T_LONG : 2926 // 32bit can't do a split move of something like g1 -> O0, O1 2927 // so use a memory temp 2928 if (src.is_single_phys_reg() && wordSize == 4) { 2929 Register tmp = L2; 2930 if (dst.first()->is_reg() && 2931 (wordSize == 8 || dst.first()->as_Register()->is_even())) { 2932 tmp = dst.first()->as_Register(); 2933 } 2934 2935 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 2936 if (Assembler::is_simm13(off)) { 2937 __ stx(src.first()->as_Register(), SP, off); 2938 __ ld_long(SP, off, tmp); 2939 } else { 2940 if (conversion_off == noreg) { 2941 __ set(off, L6); 2942 conversion_off = L6; 2943 } 2944 __ stx(src.first()->as_Register(), SP, conversion_off); 2945 __ ld_long(SP, conversion_off, tmp); 2946 } 2947 2948 if (tmp == L2) { 2949 long_move(masm, reg64_to_VMRegPair(L2), dst); 2950 } 2951 } else { 2952 long_move(masm, src, dst); 2953 } 2954 break; 2955 2956 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2957 2958 default: 2959 move32_64(masm, src, dst); 2960 } 2961 } 2962 2963 2964 // If we have any strings we must store any register based arg to the stack 2965 // This includes any still live xmm registers too. 2966 2967 if (total_strings > 0 ) { 2968 2969 // protect all the arg registers 2970 __ save_frame(0); 2971 __ mov(G2_thread, L7_thread_cache); 2972 const Register L2_string_off = L2; 2973 2974 // Get first string offset 2975 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off); 2976 2977 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) { 2978 if (out_sig_bt[c_arg] == T_ADDRESS) { 2979 2980 VMRegPair dst = out_regs[c_arg]; 2981 const Register d = dst.first()->is_reg() ? 2982 dst.first()->as_Register()->after_save() : noreg; 2983 2984 // It's a string the oop and it was already copied to the out arg 2985 // position 2986 if (d != noreg) { 2987 __ mov(d, O0); 2988 } else { 2989 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 2990 "must be"); 2991 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0); 2992 } 2993 Label skip; 2994 2995 __ br_null(O0, false, Assembler::pn, skip); 2996 __ delayed()->add(FP, L2_string_off, O1); 2997 2998 if (d != noreg) { 2999 __ mov(O1, d); 3000 } else { 3001 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 3002 "must be"); 3003 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS); 3004 } 3005 3006 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf), 3007 relocInfo::runtime_call_type); 3008 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off); 3009 3010 __ bind(skip); 3011 3012 } 3013 3014 } 3015 __ mov(L7_thread_cache, G2_thread); 3016 __ restore(); 3017 3018 } 3019 3020 3021 // Ok now we are done. Need to place the nop that dtrace wants in order to 3022 // patch in the trap 3023 3024 int patch_offset = ((intptr_t)__ pc()) - start; 3025 3026 __ nop(); 3027 3028 3029 // Return 3030 3031 __ ret(); 3032 __ delayed()->restore(); 3033 3034 __ flush(); 3035 3036 nmethod *nm = nmethod::new_dtrace_nmethod( 3037 method, masm->code(), vep_offset, patch_offset, frame_complete, 3038 stack_slots / VMRegImpl::slots_per_word); 3039 return nm; 3040 3041 } 3042 3043 #endif // HAVE_DTRACE_H 3044 3045 // this function returns the adjust size (in number of words) to a c2i adapter 3046 // activation for use during deoptimization 3047 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 3048 assert(callee_locals >= callee_parameters, 3049 "test and remove; got more parms than locals"); 3050 if (callee_locals < callee_parameters) 3051 return 0; // No adjustment for negative locals 3052 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords(); 3053 return round_to(diff, WordsPerLong); 3054 } 3055 3056 // "Top of Stack" slots that may be unused by the calling convention but must 3057 // otherwise be preserved. 3058 // On Intel these are not necessary and the value can be zero. 3059 // On Sparc this describes the words reserved for storing a register window 3060 // when an interrupt occurs. 3061 uint SharedRuntime::out_preserve_stack_slots() { 3062 return frame::register_save_words * VMRegImpl::slots_per_word; 3063 } 3064 3065 static void gen_new_frame(MacroAssembler* masm, bool deopt) { 3066 // 3067 // Common out the new frame generation for deopt and uncommon trap 3068 // 3069 Register G3pcs = G3_scratch; // Array of new pcs (input) 3070 Register Oreturn0 = O0; 3071 Register Oreturn1 = O1; 3072 Register O2UnrollBlock = O2; 3073 Register O3array = O3; // Array of frame sizes (input) 3074 Register O4array_size = O4; // number of frames (input) 3075 Register O7frame_size = O7; // number of frames (input) 3076 3077 __ ld_ptr(O3array, 0, O7frame_size); 3078 __ sub(G0, O7frame_size, O7frame_size); 3079 __ save(SP, O7frame_size, SP); 3080 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc 3081 3082 #ifdef ASSERT 3083 // make sure that the frames are aligned properly 3084 #ifndef _LP64 3085 __ btst(wordSize*2-1, SP); 3086 __ breakpoint_trap(Assembler::notZero); 3087 #endif 3088 #endif 3089 3090 // Deopt needs to pass some extra live values from frame to frame 3091 3092 if (deopt) { 3093 __ mov(Oreturn0->after_save(), Oreturn0); 3094 __ mov(Oreturn1->after_save(), Oreturn1); 3095 } 3096 3097 __ mov(O4array_size->after_save(), O4array_size); 3098 __ sub(O4array_size, 1, O4array_size); 3099 __ mov(O3array->after_save(), O3array); 3100 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock); 3101 __ add(G3pcs, wordSize, G3pcs); // point to next pc value 3102 3103 #ifdef ASSERT 3104 // trash registers to show a clear pattern in backtraces 3105 __ set(0xDEAD0000, I0); 3106 __ add(I0, 2, I1); 3107 __ add(I0, 4, I2); 3108 __ add(I0, 6, I3); 3109 __ add(I0, 8, I4); 3110 // Don't touch I5 could have valuable savedSP 3111 __ set(0xDEADBEEF, L0); 3112 __ mov(L0, L1); 3113 __ mov(L0, L2); 3114 __ mov(L0, L3); 3115 __ mov(L0, L4); 3116 __ mov(L0, L5); 3117 3118 // trash the return value as there is nothing to return yet 3119 __ set(0xDEAD0001, O7); 3120 #endif 3121 3122 __ mov(SP, O5_savedSP); 3123 } 3124 3125 3126 static void make_new_frames(MacroAssembler* masm, bool deopt) { 3127 // 3128 // loop through the UnrollBlock info and create new frames 3129 // 3130 Register G3pcs = G3_scratch; 3131 Register Oreturn0 = O0; 3132 Register Oreturn1 = O1; 3133 Register O2UnrollBlock = O2; 3134 Register O3array = O3; 3135 Register O4array_size = O4; 3136 Label loop; 3137 3138 // Before we make new frames, check to see if stack is available. 3139 // Do this after the caller's return address is on top of stack 3140 if (UseStackBanging) { 3141 // Get total frame size for interpreted frames 3142 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4); 3143 __ bang_stack_size(O4, O3, G3_scratch); 3144 } 3145 3146 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size); 3147 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs); 3148 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array); 3149 3150 // Adjust old interpreter frame to make space for new frame's extra java locals 3151 // 3152 // We capture the original sp for the transition frame only because it is needed in 3153 // order to properly calculate interpreter_sp_adjustment. Even though in real life 3154 // every interpreter frame captures a savedSP it is only needed at the transition 3155 // (fortunately). If we had to have it correct everywhere then we would need to 3156 // be told the sp_adjustment for each frame we create. If the frame size array 3157 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 3158 // for each frame we create and keep up the illusion every where. 3159 // 3160 3161 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7); 3162 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 3163 __ sub(SP, O7, SP); 3164 3165 #ifdef ASSERT 3166 // make sure that there is at least one entry in the array 3167 __ tst(O4array_size); 3168 __ breakpoint_trap(Assembler::zero); 3169 #endif 3170 3171 // Now push the new interpreter frames 3172 __ bind(loop); 3173 3174 // allocate a new frame, filling the registers 3175 3176 gen_new_frame(masm, deopt); // allocate an interpreter frame 3177 3178 __ tst(O4array_size); 3179 __ br(Assembler::notZero, false, Assembler::pn, loop); 3180 __ delayed()->add(O3array, wordSize, O3array); 3181 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc 3182 3183 } 3184 3185 //------------------------------generate_deopt_blob---------------------------- 3186 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3187 // instead. 3188 void SharedRuntime::generate_deopt_blob() { 3189 // allocate space for the code 3190 ResourceMark rm; 3191 // setup code generation tools 3192 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 3193 #ifdef _LP64 3194 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 3195 #else 3196 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread) 3197 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread) 3198 CodeBuffer buffer("deopt_blob", 1600+pad, 512); 3199 #endif /* _LP64 */ 3200 MacroAssembler* masm = new MacroAssembler(&buffer); 3201 FloatRegister Freturn0 = F0; 3202 Register Greturn1 = G1; 3203 Register Oreturn0 = O0; 3204 Register Oreturn1 = O1; 3205 Register O2UnrollBlock = O2; 3206 Register L0deopt_mode = L0; 3207 Register G4deopt_mode = G4_scratch; 3208 int frame_size_words; 3209 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS); 3210 #if !defined(_LP64) && defined(COMPILER2) 3211 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS); 3212 #endif 3213 Label cont; 3214 3215 OopMapSet *oop_maps = new OopMapSet(); 3216 3217 // 3218 // This is the entry point for code which is returning to a de-optimized 3219 // frame. 3220 // The steps taken by this frame are as follows: 3221 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1) 3222 // and all potentially live registers (at a pollpoint many registers can be live). 3223 // 3224 // - call the C routine: Deoptimization::fetch_unroll_info (this function 3225 // returns information about the number and size of interpreter frames 3226 // which are equivalent to the frame which is being deoptimized) 3227 // - deallocate the unpack frame, restoring only results values. Other 3228 // volatile registers will now be captured in the vframeArray as needed. 3229 // - deallocate the deoptimization frame 3230 // - in a loop using the information returned in the previous step 3231 // push new interpreter frames (take care to propagate the return 3232 // values through each new frame pushed) 3233 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0) 3234 // - call the C routine: Deoptimization::unpack_frames (this function 3235 // lays out values on the interpreter frame which was just created) 3236 // - deallocate the dummy unpack_frame 3237 // - ensure that all the return values are correctly set and then do 3238 // a return to the interpreter entry point 3239 // 3240 // Refer to the following methods for more information: 3241 // - Deoptimization::fetch_unroll_info 3242 // - Deoptimization::unpack_frames 3243 3244 OopMap* map = NULL; 3245 3246 int start = __ offset(); 3247 3248 // restore G2, the trampoline destroyed it 3249 __ get_thread(); 3250 3251 // On entry we have been called by the deoptimized nmethod with a call that 3252 // replaced the original call (or safepoint polling location) so the deoptimizing 3253 // pc is now in O7. Return values are still in the expected places 3254 3255 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3256 __ ba(false, cont); 3257 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode); 3258 3259 int exception_offset = __ offset() - start; 3260 3261 // restore G2, the trampoline destroyed it 3262 __ get_thread(); 3263 3264 // On entry we have been jumped to by the exception handler (or exception_blob 3265 // for server). O0 contains the exception oop and O7 contains the original 3266 // exception pc. So if we push a frame here it will look to the 3267 // stack walking code (fetch_unroll_info) just like a normal call so 3268 // state will be extracted normally. 3269 3270 // save exception oop in JavaThread and fall through into the 3271 // exception_in_tls case since they are handled in same way except 3272 // for where the pending exception is kept. 3273 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); 3274 3275 // 3276 // Vanilla deoptimization with an exception pending in exception_oop 3277 // 3278 int exception_in_tls_offset = __ offset() - start; 3279 3280 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3281 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3282 3283 // Restore G2_thread 3284 __ get_thread(); 3285 3286 #ifdef ASSERT 3287 { 3288 // verify that there is really an exception oop in exception_oop 3289 Label has_exception; 3290 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); 3291 __ br_notnull(Oexception, false, Assembler::pt, has_exception); 3292 __ delayed()-> nop(); 3293 __ stop("no exception in thread"); 3294 __ bind(has_exception); 3295 3296 // verify that there is no pending exception 3297 Label no_pending_exception; 3298 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 3299 __ ld_ptr(exception_addr, Oexception); 3300 __ br_null(Oexception, false, Assembler::pt, no_pending_exception); 3301 __ delayed()->nop(); 3302 __ stop("must not have pending exception here"); 3303 __ bind(no_pending_exception); 3304 } 3305 #endif 3306 3307 __ ba(false, cont); 3308 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);; 3309 3310 // 3311 // Reexecute entry, similar to c2 uncommon trap 3312 // 3313 int reexecute_offset = __ offset() - start; 3314 3315 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3316 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3317 3318 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode); 3319 3320 __ bind(cont); 3321 3322 __ set_last_Java_frame(SP, noreg); 3323 3324 // do the call by hand so we can get the oopmap 3325 3326 __ mov(G2_thread, L7_thread_cache); 3327 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); 3328 __ delayed()->mov(G2_thread, O0); 3329 3330 // Set an oopmap for the call site this describes all our saved volatile registers 3331 3332 oop_maps->add_gc_map( __ offset()-start, map); 3333 3334 __ mov(L7_thread_cache, G2_thread); 3335 3336 __ reset_last_Java_frame(); 3337 3338 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers 3339 // so this move will survive 3340 3341 __ mov(L0deopt_mode, G4deopt_mode); 3342 3343 __ mov(O0, O2UnrollBlock->after_save()); 3344 3345 RegisterSaver::restore_result_registers(masm); 3346 3347 Label noException; 3348 __ cmp(G4deopt_mode, Deoptimization::Unpack_exception); // Was exception pending? 3349 __ br(Assembler::notEqual, false, Assembler::pt, noException); 3350 __ delayed()->nop(); 3351 3352 // Move the pending exception from exception_oop to Oexception so 3353 // the pending exception will be picked up the interpreter. 3354 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3355 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3356 __ bind(noException); 3357 3358 // deallocate the deoptimization frame taking care to preserve the return values 3359 __ mov(Oreturn0, Oreturn0->after_save()); 3360 __ mov(Oreturn1, Oreturn1->after_save()); 3361 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3362 __ restore(); 3363 3364 // Allocate new interpreter frame(s) and possible c2i adapter frame 3365 3366 make_new_frames(masm, true); 3367 3368 // push a dummy "unpack_frame" taking care of float return values and 3369 // call Deoptimization::unpack_frames to have the unpacker layout 3370 // information in the interpreter frames just created and then return 3371 // to the interpreter entry point 3372 __ save(SP, -frame_size_words*wordSize, SP); 3373 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); 3374 #if !defined(_LP64) 3375 #if defined(COMPILER2) 3376 if (!TieredCompilation) { 3377 // 32-bit 1-register longs return longs in G1 3378 __ stx(Greturn1, saved_Greturn1_addr); 3379 } 3380 #endif 3381 __ set_last_Java_frame(SP, noreg); 3382 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode); 3383 #else 3384 // LP64 uses g4 in set_last_Java_frame 3385 __ mov(G4deopt_mode, O1); 3386 __ set_last_Java_frame(SP, G0); 3387 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); 3388 #endif 3389 __ reset_last_Java_frame(); 3390 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); 3391 3392 // In tiered we never use C2 to compile methods returning longs so 3393 // the result is where we expect it already. 3394 3395 #if !defined(_LP64) && defined(COMPILER2) 3396 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into 3397 // I0/I1 if the return value is long. In the tiered world there is 3398 // a mismatch between how C1 and C2 return longs compiles and so 3399 // currently compilation of methods which return longs is disabled 3400 // for C2 and so is this code. Eventually C1 and C2 will do the 3401 // same thing for longs in the tiered world. 3402 if (!TieredCompilation) { 3403 Label not_long; 3404 __ cmp(O0,T_LONG); 3405 __ br(Assembler::notEqual, false, Assembler::pt, not_long); 3406 __ delayed()->nop(); 3407 __ ldd(saved_Greturn1_addr,I0); 3408 __ bind(not_long); 3409 } 3410 #endif 3411 __ ret(); 3412 __ delayed()->restore(); 3413 3414 masm->flush(); 3415 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words); 3416 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3417 } 3418 3419 #ifdef COMPILER2 3420 3421 //------------------------------generate_uncommon_trap_blob-------------------- 3422 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3423 // instead. 3424 void SharedRuntime::generate_uncommon_trap_blob() { 3425 // allocate space for the code 3426 ResourceMark rm; 3427 // setup code generation tools 3428 int pad = VerifyThread ? 512 : 0; 3429 #ifdef _LP64 3430 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 3431 #else 3432 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread) 3433 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread) 3434 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512); 3435 #endif 3436 MacroAssembler* masm = new MacroAssembler(&buffer); 3437 Register O2UnrollBlock = O2; 3438 Register O2klass_index = O2; 3439 3440 // 3441 // This is the entry point for all traps the compiler takes when it thinks 3442 // it cannot handle further execution of compilation code. The frame is 3443 // deoptimized in these cases and converted into interpreter frames for 3444 // execution 3445 // The steps taken by this frame are as follows: 3446 // - push a fake "unpack_frame" 3447 // - call the C routine Deoptimization::uncommon_trap (this function 3448 // packs the current compiled frame into vframe arrays and returns 3449 // information about the number and size of interpreter frames which 3450 // are equivalent to the frame which is being deoptimized) 3451 // - deallocate the "unpack_frame" 3452 // - deallocate the deoptimization frame 3453 // - in a loop using the information returned in the previous step 3454 // push interpreter frames; 3455 // - create a dummy "unpack_frame" 3456 // - call the C routine: Deoptimization::unpack_frames (this function 3457 // lays out values on the interpreter frame which was just created) 3458 // - deallocate the dummy unpack_frame 3459 // - return to the interpreter entry point 3460 // 3461 // Refer to the following methods for more information: 3462 // - Deoptimization::uncommon_trap 3463 // - Deoptimization::unpack_frame 3464 3465 // the unloaded class index is in O0 (first parameter to this blob) 3466 3467 // push a dummy "unpack_frame" 3468 // and call Deoptimization::uncommon_trap to pack the compiled frame into 3469 // vframe array and return the UnrollBlock information 3470 __ save_frame(0); 3471 __ set_last_Java_frame(SP, noreg); 3472 __ mov(I0, O2klass_index); 3473 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index); 3474 __ reset_last_Java_frame(); 3475 __ mov(O0, O2UnrollBlock->after_save()); 3476 __ restore(); 3477 3478 // deallocate the deoptimized frame taking care to preserve the return values 3479 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3480 __ restore(); 3481 3482 // Allocate new interpreter frame(s) and possible c2i adapter frame 3483 3484 make_new_frames(masm, false); 3485 3486 // push a dummy "unpack_frame" taking care of float return values and 3487 // call Deoptimization::unpack_frames to have the unpacker layout 3488 // information in the interpreter frames just created and then return 3489 // to the interpreter entry point 3490 __ save_frame(0); 3491 __ set_last_Java_frame(SP, noreg); 3492 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case 3493 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3); 3494 __ reset_last_Java_frame(); 3495 __ ret(); 3496 __ delayed()->restore(); 3497 3498 masm->flush(); 3499 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize); 3500 } 3501 3502 #endif // COMPILER2 3503 3504 //------------------------------generate_handler_blob------------------- 3505 // 3506 // Generate a special Compile2Runtime blob that saves all registers, and sets 3507 // up an OopMap. 3508 // 3509 // This blob is jumped to (via a breakpoint and the signal handler) from a 3510 // safepoint in compiled code. On entry to this blob, O7 contains the 3511 // address in the original nmethod at which we should resume normal execution. 3512 // Thus, this blob looks like a subroutine which must preserve lots of 3513 // registers and return normally. Note that O7 is never register-allocated, 3514 // so it is guaranteed to be free here. 3515 // 3516 3517 // The hardest part of what this blob must do is to save the 64-bit %o 3518 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and 3519 // an interrupt will chop off their heads. Making space in the caller's frame 3520 // first will let us save the 64-bit %o's before save'ing, but we cannot hand 3521 // the adjusted FP off to the GC stack-crawler: this will modify the caller's 3522 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save 3523 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). 3524 // Tricky, tricky, tricky... 3525 3526 static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) { 3527 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3528 3529 // allocate space for the code 3530 ResourceMark rm; 3531 // setup code generation tools 3532 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3533 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3534 // even larger with TraceJumps 3535 int pad = TraceJumps ? 512 : 0; 3536 CodeBuffer buffer("handler_blob", 1600 + pad, 512); 3537 MacroAssembler* masm = new MacroAssembler(&buffer); 3538 int frame_size_words; 3539 OopMapSet *oop_maps = new OopMapSet(); 3540 OopMap* map = NULL; 3541 3542 int start = __ offset(); 3543 3544 // If this causes a return before the processing, then do a "restore" 3545 if (cause_return) { 3546 __ restore(); 3547 } else { 3548 // Make it look like we were called via the poll 3549 // so that frame constructor always sees a valid return address 3550 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7); 3551 __ sub(O7, frame::pc_return_offset, O7); 3552 } 3553 3554 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3555 3556 // setup last_Java_sp (blows G4) 3557 __ set_last_Java_frame(SP, noreg); 3558 3559 // call into the runtime to handle illegal instructions exception 3560 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3561 __ mov(G2_thread, O0); 3562 __ save_thread(L7_thread_cache); 3563 __ call(call_ptr); 3564 __ delayed()->nop(); 3565 3566 // Set an oopmap for the call site. 3567 // We need this not only for callee-saved registers, but also for volatile 3568 // registers that the compiler might be keeping live across a safepoint. 3569 3570 oop_maps->add_gc_map( __ offset() - start, map); 3571 3572 __ restore_thread(L7_thread_cache); 3573 // clear last_Java_sp 3574 __ reset_last_Java_frame(); 3575 3576 // Check for exceptions 3577 Label pending; 3578 3579 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3580 __ tst(O1); 3581 __ brx(Assembler::notEqual, true, Assembler::pn, pending); 3582 __ delayed()->nop(); 3583 3584 RegisterSaver::restore_live_registers(masm); 3585 3586 // We are back the the original state on entry and ready to go. 3587 3588 __ retl(); 3589 __ delayed()->nop(); 3590 3591 // Pending exception after the safepoint 3592 3593 __ bind(pending); 3594 3595 RegisterSaver::restore_live_registers(masm); 3596 3597 // We are back the the original state on entry. 3598 3599 // Tail-call forward_exception_entry, with the issuing PC in O7, 3600 // so it looks like the original nmethod called forward_exception_entry. 3601 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3602 __ JMP(O0, 0); 3603 __ delayed()->nop(); 3604 3605 // ------------- 3606 // make sure all code is generated 3607 masm->flush(); 3608 3609 // return exception blob 3610 return SafepointBlob::create(&buffer, oop_maps, frame_size_words); 3611 } 3612 3613 // 3614 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3615 // 3616 // Generate a stub that calls into vm to find out the proper destination 3617 // of a java call. All the argument registers are live at this point 3618 // but since this is generic code we don't know what they are and the caller 3619 // must do any gc of the args. 3620 // 3621 static RuntimeStub* generate_resolve_blob(address destination, const char* name) { 3622 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3623 3624 // allocate space for the code 3625 ResourceMark rm; 3626 // setup code generation tools 3627 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3628 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3629 // even larger with TraceJumps 3630 int pad = TraceJumps ? 512 : 0; 3631 CodeBuffer buffer(name, 1600 + pad, 512); 3632 MacroAssembler* masm = new MacroAssembler(&buffer); 3633 int frame_size_words; 3634 OopMapSet *oop_maps = new OopMapSet(); 3635 OopMap* map = NULL; 3636 3637 int start = __ offset(); 3638 3639 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3640 3641 int frame_complete = __ offset(); 3642 3643 // setup last_Java_sp (blows G4) 3644 __ set_last_Java_frame(SP, noreg); 3645 3646 // call into the runtime to handle illegal instructions exception 3647 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3648 __ mov(G2_thread, O0); 3649 __ save_thread(L7_thread_cache); 3650 __ call(destination, relocInfo::runtime_call_type); 3651 __ delayed()->nop(); 3652 3653 // O0 contains the address we are going to jump to assuming no exception got installed 3654 3655 // Set an oopmap for the call site. 3656 // We need this not only for callee-saved registers, but also for volatile 3657 // registers that the compiler might be keeping live across a safepoint. 3658 3659 oop_maps->add_gc_map( __ offset() - start, map); 3660 3661 __ restore_thread(L7_thread_cache); 3662 // clear last_Java_sp 3663 __ reset_last_Java_frame(); 3664 3665 // Check for exceptions 3666 Label pending; 3667 3668 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3669 __ tst(O1); 3670 __ brx(Assembler::notEqual, true, Assembler::pn, pending); 3671 __ delayed()->nop(); 3672 3673 // get the returned methodOop 3674 3675 __ get_vm_result(G5_method); 3676 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 3677 3678 // O0 is where we want to jump, overwrite G3 which is saved and scratch 3679 3680 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 3681 3682 RegisterSaver::restore_live_registers(masm); 3683 3684 // We are back the the original state on entry and ready to go. 3685 3686 __ JMP(G3, 0); 3687 __ delayed()->nop(); 3688 3689 // Pending exception after the safepoint 3690 3691 __ bind(pending); 3692 3693 RegisterSaver::restore_live_registers(masm); 3694 3695 // We are back the the original state on entry. 3696 3697 // Tail-call forward_exception_entry, with the issuing PC in O7, 3698 // so it looks like the original nmethod called forward_exception_entry. 3699 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3700 __ JMP(O0, 0); 3701 __ delayed()->nop(); 3702 3703 // ------------- 3704 // make sure all code is generated 3705 masm->flush(); 3706 3707 // return the blob 3708 // frame_size_words or bytes?? 3709 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3710 } 3711 3712 void SharedRuntime::generate_stubs() { 3713 3714 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), 3715 "wrong_method_stub"); 3716 3717 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), 3718 "ic_miss_stub"); 3719 3720 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), 3721 "resolve_opt_virtual_call"); 3722 3723 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), 3724 "resolve_virtual_call"); 3725 3726 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), 3727 "resolve_static_call"); 3728 3729 _polling_page_safepoint_handler_blob = 3730 generate_handler_blob(CAST_FROM_FN_PTR(address, 3731 SafepointSynchronize::handle_polling_page_exception), false); 3732 3733 _polling_page_return_handler_blob = 3734 generate_handler_blob(CAST_FROM_FN_PTR(address, 3735 SafepointSynchronize::handle_polling_page_exception), true); 3736 3737 generate_deopt_blob(); 3738 3739 #ifdef COMPILER2 3740 generate_uncommon_trap_blob(); 3741 #endif // COMPILER2 3742 }