1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)sharedRuntime_sparc.cpp 1.52 07/08/29 13:42:18 JVM" 3 #endif 4 /* 5 * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 #include "incls/_precompiled.incl" 29 #include "incls/_sharedRuntime_sparc.cpp.incl" 30 31 #define __ masm-> 32 33 #ifdef COMPILER2 34 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob; 35 #endif // COMPILER2 36 37 DeoptimizationBlob* SharedRuntime::_deopt_blob; 38 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob; 39 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob; 40 RuntimeStub* SharedRuntime::_wrong_method_blob; 41 RuntimeStub* SharedRuntime::_ic_miss_blob; 42 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob; 43 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob; 44 RuntimeStub* SharedRuntime::_resolve_static_call_blob; 45 46 class RegisterSaver { 47 48 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O. 49 // The Oregs are problematic. In the 32bit build the compiler can 50 // have O registers live with 64 bit quantities. A window save will 51 // cut the heads off of the registers. We have to do a very extensive 52 // stack dance to save and restore these properly. 53 54 // Note that the Oregs problem only exists if we block at either a polling 55 // page exception a compiled code safepoint that was not originally a call 56 // or deoptimize following one of these kinds of safepoints. 57 58 // Lots of registers to save. For all builds, a window save will preserve 59 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit 60 // builds a window-save will preserve the %o registers. In the LION build 61 // we need to save the 64-bit %o registers which requires we save them 62 // before the window-save (as then they become %i registers and get their 63 // heads chopped off on interrupt). We have to save some %g registers here 64 // as well. 65 enum { 66 // This frame's save area. Includes extra space for the native call: 67 // vararg's layout space and the like. Briefly holds the caller's 68 // register save area. 69 call_args_area = frame::register_save_words_sp_offset + 70 frame::memory_parameter_word_sp_offset*wordSize, 71 // Make sure save locations are always 8 byte aligned. 72 // can't use round_to because it doesn't produce compile time constant 73 start_of_extra_save_area = ((call_args_area + 7) & ~7), 74 g1_offset = start_of_extra_save_area, // g-regs needing saving 75 g3_offset = g1_offset+8, 76 g4_offset = g3_offset+8, 77 g5_offset = g4_offset+8, 78 o0_offset = g5_offset+8, 79 o1_offset = o0_offset+8, 80 o2_offset = o1_offset+8, 81 o3_offset = o2_offset+8, 82 o4_offset = o3_offset+8, 83 o5_offset = o4_offset+8, 84 start_of_flags_save_area = o5_offset+8, 85 ccr_offset = start_of_flags_save_area, 86 fsr_offset = ccr_offset + 8, 87 d00_offset = fsr_offset+8, // Start of float save area 88 register_save_size = d00_offset+8*32 89 }; 90 91 92 public: 93 94 static int Oexception_offset() { return o0_offset; }; 95 static int G3_offset() { return g3_offset; }; 96 static int G5_offset() { return g5_offset; }; 97 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 98 static void restore_live_registers(MacroAssembler* masm); 99 100 // During deoptimization only the result register need to be restored 101 // all the other values have already been extracted. 102 103 static void restore_result_registers(MacroAssembler* masm); 104 }; 105 106 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 107 // Record volatile registers as callee-save values in an OopMap so their save locations will be 108 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 109 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 110 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 111 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 112 int i; 113 // Always make the frame size 16 bytr aligned. 114 int frame_size = round_to(additional_frame_words + register_save_size, 16); 115 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words 116 int frame_size_in_slots = frame_size / sizeof(jint); 117 // CodeBlob frame size is in words. 118 *total_frame_words = frame_size / wordSize; 119 // OopMap* map = new OopMap(*total_frame_words, 0); 120 OopMap* map = new OopMap(frame_size_in_slots, 0); 121 122 #if !defined(_LP64) 123 124 // Save 64-bit O registers; they will get their heads chopped off on a 'save'. 125 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 126 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 127 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 128 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 129 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 130 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 131 #endif /* _LP64 */ 132 133 __ save(SP, -frame_size, SP); 134 135 #ifndef _LP64 136 // Reload the 64 bit Oregs. Although they are now Iregs we load them 137 // to Oregs here to avoid interrupts cutting off their heads 138 139 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 140 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 141 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 142 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 143 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 144 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 145 146 __ stx(O0, SP, o0_offset+STACK_BIAS); 147 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg()); 148 149 __ stx(O1, SP, o1_offset+STACK_BIAS); 150 151 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg()); 152 153 __ stx(O2, SP, o2_offset+STACK_BIAS); 154 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg()); 155 156 __ stx(O3, SP, o3_offset+STACK_BIAS); 157 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg()); 158 159 __ stx(O4, SP, o4_offset+STACK_BIAS); 160 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg()); 161 162 __ stx(O5, SP, o5_offset+STACK_BIAS); 163 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg()); 164 #endif /* _LP64 */ 165 166 167 #ifdef _LP64 168 int debug_offset = 0; 169 #else 170 int debug_offset = 4; 171 #endif 172 // Save the G's 173 __ stx(G1, SP, g1_offset+STACK_BIAS); 174 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); 175 176 __ stx(G3, SP, g3_offset+STACK_BIAS); 177 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg()); 178 179 __ stx(G4, SP, g4_offset+STACK_BIAS); 180 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg()); 181 182 __ stx(G5, SP, g5_offset+STACK_BIAS); 183 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg()); 184 185 // This is really a waste but we'll keep things as they were for now 186 if (true) { 187 #ifndef _LP64 188 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next()); 189 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next()); 190 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next()); 191 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next()); 192 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next()); 193 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next()); 194 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next()); 195 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next()); 196 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next()); 197 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next()); 198 #endif /* _LP64 */ 199 } 200 201 202 // Save the flags 203 __ rdccr( G5 ); 204 __ stx(G5, SP, ccr_offset+STACK_BIAS); 205 __ stxfsr(SP, fsr_offset+STACK_BIAS); 206 207 // Save all the FP registers 208 int offset = d00_offset; 209 for( int i=0; i<64; i+=2 ) { 210 FloatRegister f = as_FloatRegister(i); 211 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS); 212 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg()); 213 if (true) { 214 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next()); 215 } 216 offset += sizeof(double); 217 } 218 219 // And we're done. 220 221 return map; 222 } 223 224 225 // Pop the current frame and restore all the registers that we 226 // saved. 227 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 228 229 // Restore all the FP registers 230 for( int i=0; i<64; i+=2 ) { 231 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i)); 232 } 233 234 __ ldx(SP, ccr_offset+STACK_BIAS, G1); 235 __ wrccr (G1) ; 236 237 // Restore the G's 238 // Note that G2 (AKA GThread) must be saved and restored separately. 239 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr. 240 241 __ ldx(SP, g1_offset+STACK_BIAS, G1); 242 __ ldx(SP, g3_offset+STACK_BIAS, G3); 243 __ ldx(SP, g4_offset+STACK_BIAS, G4); 244 __ ldx(SP, g5_offset+STACK_BIAS, G5); 245 246 247 #if !defined(_LP64) 248 // Restore the 64-bit O's. 249 __ ldx(SP, o0_offset+STACK_BIAS, O0); 250 __ ldx(SP, o1_offset+STACK_BIAS, O1); 251 __ ldx(SP, o2_offset+STACK_BIAS, O2); 252 __ ldx(SP, o3_offset+STACK_BIAS, O3); 253 __ ldx(SP, o4_offset+STACK_BIAS, O4); 254 __ ldx(SP, o5_offset+STACK_BIAS, O5); 255 256 // And temporarily place them in TLS 257 258 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 259 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 260 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 261 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 262 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 263 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 264 #endif /* _LP64 */ 265 266 // Restore flags 267 268 __ ldxfsr(SP, fsr_offset+STACK_BIAS); 269 270 __ restore(); 271 272 #if !defined(_LP64) 273 // Now reload the 64bit Oregs after we've restore the window. 274 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 275 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 276 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 277 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 278 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 279 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 280 #endif /* _LP64 */ 281 282 } 283 284 // Pop the current frame and restore the registers that might be holding 285 // a result. 286 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 287 288 #if !defined(_LP64) 289 // 32bit build returns longs in G1 290 __ ldx(SP, g1_offset+STACK_BIAS, G1); 291 292 // Retrieve the 64-bit O's. 293 __ ldx(SP, o0_offset+STACK_BIAS, O0); 294 __ ldx(SP, o1_offset+STACK_BIAS, O1); 295 // and save to TLS 296 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 297 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 298 #endif /* _LP64 */ 299 300 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); 301 302 __ restore(); 303 304 #if !defined(_LP64) 305 // Now reload the 64bit Oregs after we've restore the window. 306 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 307 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 308 #endif /* _LP64 */ 309 310 } 311 312 // The java_calling_convention describes stack locations as ideal slots on 313 // a frame with no abi restrictions. Since we must observe abi restrictions 314 // (like the placement of the register window) the slots must be biased by 315 // the following value. 316 static int reg2offset(VMReg r) { 317 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 318 } 319 320 // --------------------------------------------------------------------------- 321 // Read the array of BasicTypes from a signature, and compute where the 322 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 323 // quantities. Values less than VMRegImpl::stack0 are registers, those above 324 // refer to 4-byte stack slots. All stack slots are based off of the window 325 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window, 326 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 327 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit 328 // integer registers. Values 64-95 are the (32-bit only) float registers. 329 // Each 32-bit quantity is given its own number, so the integer registers 330 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is 331 // an O0-low and an O0-high. Essentially, all int register numbers are doubled. 332 333 // Register results are passed in O0-O5, for outgoing call arguments. To 334 // convert to incoming arguments, convert all O's to I's. The regs array 335 // refer to the low and hi 32-bit words of 64-bit registers or stack slots. 336 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a 337 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was 338 // passed (used as a placeholder for the other half of longs and doubles in 339 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is 340 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention). 341 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first() 342 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the 343 // same VMRegPair. 344 345 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 346 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 347 // units regardless of build. 348 349 350 // --------------------------------------------------------------------------- 351 // The compiled Java calling convention. The Java convention always passes 352 // 64-bit values in adjacent aligned locations (either registers or stack), 353 // floats in float registers and doubles in aligned float pairs. Values are 354 // packed in the registers. There is no backing varargs store for values in 355 // registers. In the 32-bit build, longs are passed in G1 and G4 (cannot be 356 // passed in I's, because longs in I's get their heads chopped off at 357 // interrupt). 358 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 359 VMRegPair *regs, 360 int total_args_passed, 361 int is_outgoing) { 362 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers"); 363 364 // Convention is to pack the first 6 int/oop args into the first 6 registers 365 // (I0-I5), extras spill to the stack. Then pack the first 8 float args 366 // into F0-F7, extras spill to the stack. Then pad all register sets to 367 // align. Then put longs and doubles into the same registers as they fit, 368 // else spill to the stack. 369 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM; 370 const int flt_reg_max = 8; 371 // 372 // Where 32-bit 1-reg longs start being passed 373 // In tiered we must pass on stack because c1 can't use a "pair" in a single reg. 374 // So make it look like we've filled all the G regs that c2 wants to use. 375 Register g_reg = TieredCompilation ? noreg : G1; 376 377 // Count int/oop and float args. See how many stack slots we'll need and 378 // where the longs & doubles will go. 379 int int_reg_cnt = 0; 380 int flt_reg_cnt = 0; 381 // int stk_reg_pairs = frame::register_save_words*(wordSize>>2); 382 // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots(); 383 int stk_reg_pairs = 0; 384 for (int i = 0; i < total_args_passed; i++) { 385 switch (sig_bt[i]) { 386 case T_LONG: // LP64, longs compete with int args 387 assert(sig_bt[i+1] == T_VOID, ""); 388 #ifdef _LP64 389 if (int_reg_cnt < int_reg_max) int_reg_cnt++; 390 #endif 391 break; 392 case T_OBJECT: 393 case T_ARRAY: 394 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 395 if (int_reg_cnt < int_reg_max) int_reg_cnt++; 396 #ifndef _LP64 397 else stk_reg_pairs++; 398 #endif 399 break; 400 case T_INT: 401 case T_SHORT: 402 case T_CHAR: 403 case T_BYTE: 404 case T_BOOLEAN: 405 if (int_reg_cnt < int_reg_max) int_reg_cnt++; 406 else stk_reg_pairs++; 407 break; 408 case T_FLOAT: 409 if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++; 410 else stk_reg_pairs++; 411 break; 412 case T_DOUBLE: 413 assert(sig_bt[i+1] == T_VOID, ""); 414 break; 415 case T_VOID: 416 break; 417 default: 418 ShouldNotReachHere(); 419 } 420 } 421 422 // This is where the longs/doubles start on the stack. 423 stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round 424 425 int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only 426 int flt_reg_pairs = (flt_reg_cnt+1) & ~1; 427 428 // int stk_reg = frame::register_save_words*(wordSize>>2); 429 // int stk_reg = SharedRuntime::out_preserve_stack_slots(); 430 int stk_reg = 0; 431 int int_reg = 0; 432 int flt_reg = 0; 433 434 // Now do the signature layout 435 for (int i = 0; i < total_args_passed; i++) { 436 switch (sig_bt[i]) { 437 case T_INT: 438 case T_SHORT: 439 case T_CHAR: 440 case T_BYTE: 441 case T_BOOLEAN: 442 #ifndef _LP64 443 case T_OBJECT: 444 case T_ARRAY: 445 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 446 #endif // _LP64 447 if (int_reg < int_reg_max) { 448 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 449 regs[i].set1(r->as_VMReg()); 450 } else { 451 regs[i].set1(VMRegImpl::stack2reg(stk_reg++)); 452 } 453 break; 454 455 #ifdef _LP64 456 case T_OBJECT: 457 case T_ARRAY: 458 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 459 if (int_reg < int_reg_max) { 460 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 461 regs[i].set2(r->as_VMReg()); 462 } else { 463 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 464 stk_reg_pairs += 2; 465 } 466 break; 467 #endif // _LP64 468 469 case T_LONG: 470 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 471 #ifdef _LP64 472 if (int_reg < int_reg_max) { 473 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 474 regs[i].set2(r->as_VMReg()); 475 } else { 476 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 477 stk_reg_pairs += 2; 478 } 479 #else 480 #ifdef COMPILER2 481 // For 32-bit build, can't pass longs in O-regs because they become 482 // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost 483 // spare and available. This convention isn't used by the Sparc ABI or 484 // anywhere else. If we're tiered then we don't use G-regs because c1 485 // can't deal with them as a "pair". (Tiered makes this code think g's are filled) 486 // G0: zero 487 // G1: 1st Long arg 488 // G2: global allocated to TLS 489 // G3: used in inline cache check 490 // G4: 2nd Long arg 491 // G5: used in inline cache check 492 // G6: used by OS 493 // G7: used by OS 494 495 if (g_reg == G1) { 496 regs[i].set2(G1->as_VMReg()); // This long arg in G1 497 g_reg = G4; // Where the next arg goes 498 } else if (g_reg == G4) { 499 regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4 500 g_reg = noreg; // No more longs in registers 501 } else { 502 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 503 stk_reg_pairs += 2; 504 } 505 #else // COMPILER2 506 if (int_reg_pairs + 1 < int_reg_max) { 507 if (is_outgoing) { 508 regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg()); 509 } else { 510 regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg()); 511 } 512 int_reg_pairs += 2; 513 } else { 514 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 515 stk_reg_pairs += 2; 516 } 517 #endif // COMPILER2 518 #endif // _LP64 519 break; 520 521 case T_FLOAT: 522 if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg()); 523 else regs[i].set1( VMRegImpl::stack2reg(stk_reg++)); 524 break; 525 case T_DOUBLE: 526 assert(sig_bt[i+1] == T_VOID, "expecting half"); 527 if (flt_reg_pairs + 1 < flt_reg_max) { 528 regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg()); 529 flt_reg_pairs += 2; 530 } else { 531 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 532 stk_reg_pairs += 2; 533 } 534 break; 535 case T_VOID: regs[i].set_bad(); break; // Halves of longs & doubles 536 default: 537 ShouldNotReachHere(); 538 } 539 } 540 541 // retun the amount of stack space these arguments will need. 542 return stk_reg_pairs; 543 544 } 545 546 // Helper class mostly to avoid passing masm everywhere, and handle store 547 // displacement overflow logic for LP64 548 class AdapterGenerator { 549 MacroAssembler *masm; 550 #ifdef _LP64 551 Register Rdisp; 552 void set_Rdisp(Register r) { Rdisp = r; } 553 #endif // _LP64 554 555 void patch_callers_callsite(); 556 void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch); 557 558 // base+st_off points to top of argument 559 int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); } 560 int next_arg_offset(const int st_off) { 561 return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes(); 562 } 563 564 #ifdef _LP64 565 // On _LP64 argument slot values are loaded first into a register 566 // because they might not fit into displacement. 567 Register arg_slot(const int st_off); 568 Register next_arg_slot(const int st_off); 569 #else 570 int arg_slot(const int st_off) { return arg_offset(st_off); } 571 int next_arg_slot(const int st_off) { return next_arg_offset(st_off); } 572 #endif // _LP64 573 574 // Stores long into offset pointed to by base 575 void store_c2i_long(Register r, Register base, 576 const int st_off, bool is_stack); 577 void store_c2i_object(Register r, Register base, 578 const int st_off); 579 void store_c2i_int(Register r, Register base, 580 const int st_off); 581 void store_c2i_double(VMReg r_2, 582 VMReg r_1, Register base, const int st_off); 583 void store_c2i_float(FloatRegister f, Register base, 584 const int st_off); 585 586 public: 587 void gen_c2i_adapter(int total_args_passed, 588 // VMReg max_arg, 589 int comp_args_on_stack, // VMRegStackSlots 590 const BasicType *sig_bt, 591 const VMRegPair *regs, 592 Label& skip_fixup); 593 void gen_i2c_adapter(int total_args_passed, 594 // VMReg max_arg, 595 int comp_args_on_stack, // VMRegStackSlots 596 const BasicType *sig_bt, 597 const VMRegPair *regs); 598 599 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} 600 }; 601 602 603 // Patch the callers callsite with entry to compiled code if it exists. 604 void AdapterGenerator::patch_callers_callsite() { 605 Label L; 606 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 607 __ br_null(G3_scratch, false, __ pt, L); 608 // Schedule the branch target address early. 609 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 610 // Call into the VM to patch the caller, then jump to compiled callee 611 __ save_frame(4); // Args in compiled layout; do not blow them 612 613 // Must save all the live Gregs the list is: 614 // G1: 1st Long arg (32bit build) 615 // G2: global allocated to TLS 616 // G3: used in inline cache check (scratch) 617 // G4: 2nd Long arg (32bit build); 618 // G5: used in inline cache check (methodOop) 619 620 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 621 622 #ifdef _LP64 623 // mov(s,d) 624 __ mov(G1, L1); 625 __ mov(G4, L4); 626 __ mov(G5_method, L5); 627 __ mov(G5_method, O0); // VM needs target method 628 __ mov(I7, O1); // VM needs caller's callsite 629 // Must be a leaf call... 630 // can be very far once the blob has been relocated 631 Address dest(O7, CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 632 __ relocate(relocInfo::runtime_call_type); 633 __ jumpl_to(dest, O7); 634 __ delayed()->mov(G2_thread, L7_thread_cache); 635 __ mov(L7_thread_cache, G2_thread); 636 __ mov(L1, G1); 637 __ mov(L4, G4); 638 __ mov(L5, G5_method); 639 #else 640 __ stx(G1, FP, -8 + STACK_BIAS); 641 __ stx(G4, FP, -16 + STACK_BIAS); 642 __ mov(G5_method, L5); 643 __ mov(G5_method, O0); // VM needs target method 644 __ mov(I7, O1); // VM needs caller's callsite 645 // Must be a leaf call... 646 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type); 647 __ delayed()->mov(G2_thread, L7_thread_cache); 648 __ mov(L7_thread_cache, G2_thread); 649 __ ldx(FP, -8 + STACK_BIAS, G1); 650 __ ldx(FP, -16 + STACK_BIAS, G4); 651 __ mov(L5, G5_method); 652 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 653 #endif /* _LP64 */ 654 655 __ restore(); // Restore args 656 __ bind(L); 657 } 658 659 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off, 660 Register scratch) { 661 if (TaggedStackInterpreter) { 662 int tag_off = st_off + Interpreter::tag_offset_in_bytes(); 663 #ifdef _LP64 664 Register tag_slot = Rdisp; 665 __ set(tag_off, tag_slot); 666 #else 667 int tag_slot = tag_off; 668 #endif // _LP64 669 // have to store zero because local slots can be reused (rats!) 670 if (t == frame::TagValue) { 671 __ st_ptr(G0, base, tag_slot); 672 } else if (t == frame::TagCategory2) { 673 __ st_ptr(G0, base, tag_slot); 674 int next_tag_off = st_off - Interpreter::stackElementSize() + 675 Interpreter::tag_offset_in_bytes(); 676 #ifdef _LP64 677 __ set(next_tag_off, tag_slot); 678 #else 679 tag_slot = next_tag_off; 680 #endif // _LP64 681 __ st_ptr(G0, base, tag_slot); 682 } else { 683 __ mov(t, scratch); 684 __ st_ptr(scratch, base, tag_slot); 685 } 686 } 687 } 688 689 #ifdef _LP64 690 Register AdapterGenerator::arg_slot(const int st_off) { 691 __ set( arg_offset(st_off), Rdisp); 692 return Rdisp; 693 } 694 695 Register AdapterGenerator::next_arg_slot(const int st_off){ 696 __ set( next_arg_offset(st_off), Rdisp); 697 return Rdisp; 698 } 699 #endif // _LP64 700 701 // Stores long into offset pointed to by base 702 void AdapterGenerator::store_c2i_long(Register r, Register base, 703 const int st_off, bool is_stack) { 704 #ifdef _LP64 705 // In V9, longs are given 2 64-bit slots in the interpreter, but the 706 // data is passed in only 1 slot. 707 __ stx(r, base, next_arg_slot(st_off)); 708 #else 709 #ifdef COMPILER2 710 // Misaligned store of 64-bit data 711 __ stw(r, base, arg_slot(st_off)); // lo bits 712 __ srlx(r, 32, r); 713 __ stw(r, base, next_arg_slot(st_off)); // hi bits 714 #else 715 if (is_stack) { 716 // Misaligned store of 64-bit data 717 __ stw(r, base, arg_slot(st_off)); // lo bits 718 __ srlx(r, 32, r); 719 __ stw(r, base, next_arg_slot(st_off)); // hi bits 720 } else { 721 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits 722 __ stw(r , base, next_arg_slot(st_off)); // hi bits 723 } 724 #endif // COMPILER2 725 #endif // _LP64 726 tag_c2i_arg(frame::TagCategory2, base, st_off, r); 727 } 728 729 void AdapterGenerator::store_c2i_object(Register r, Register base, 730 const int st_off) { 731 __ st_ptr (r, base, arg_slot(st_off)); 732 tag_c2i_arg(frame::TagReference, base, st_off, r); 733 } 734 735 void AdapterGenerator::store_c2i_int(Register r, Register base, 736 const int st_off) { 737 __ st (r, base, arg_slot(st_off)); 738 tag_c2i_arg(frame::TagValue, base, st_off, r); 739 } 740 741 // Stores into offset pointed to by base 742 void AdapterGenerator::store_c2i_double(VMReg r_2, 743 VMReg r_1, Register base, const int st_off) { 744 #ifdef _LP64 745 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 746 // data is passed in only 1 slot. 747 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 748 #else 749 // Need to marshal 64-bit value from misaligned Lesp loads 750 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 751 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) ); 752 #endif 753 tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch); 754 } 755 756 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 757 const int st_off) { 758 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off)); 759 tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch); 760 } 761 762 void AdapterGenerator::gen_c2i_adapter( 763 int total_args_passed, 764 // VMReg max_arg, 765 int comp_args_on_stack, // VMRegStackSlots 766 const BasicType *sig_bt, 767 const VMRegPair *regs, 768 Label& skip_fixup) { 769 770 // Before we get into the guts of the C2I adapter, see if we should be here 771 // at all. We've come from compiled code and are attempting to jump to the 772 // interpreter, which means the caller made a static call to get here 773 // (vcalls always get a compiled target if there is one). Check for a 774 // compiled target. If there is one, we need to patch the caller's call. 775 // However we will run interpreted if we come thru here. The next pass 776 // thru the call site will run compiled. If we ran compiled here then 777 // we can (theorectically) do endless i2c->c2i->i2c transitions during 778 // deopt/uncommon trap cycles. If we always go interpreted here then 779 // we can have at most one and don't need to play any tricks to keep 780 // from endlessly growing the stack. 781 // 782 // Actually if we detected that we had an i2c->c2i transition here we 783 // ought to be able to reset the world back to the state of the interpreted 784 // call and not bother building another interpreter arg area. We don't 785 // do that at this point. 786 787 patch_callers_callsite(); 788 789 __ bind(skip_fixup); 790 791 // Since all args are passed on the stack, total_args_passed*wordSize is the 792 // space we need. Add in varargs area needed by the interpreter. Round up 793 // to stack alignment. 794 const int arg_size = total_args_passed * Interpreter::stackElementSize(); 795 const int varargs_area = 796 (frame::varargs_offset - frame::register_save_words)*wordSize; 797 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize); 798 799 int bias = STACK_BIAS; 800 const int interp_arg_offset = frame::varargs_offset*wordSize + 801 (total_args_passed-1)*Interpreter::stackElementSize(); 802 803 Register base = SP; 804 805 #ifdef _LP64 806 // In the 64bit build because of wider slots and STACKBIAS we can run 807 // out of bits in the displacement to do loads and stores. Use g3 as 808 // temporary displacement. 809 if (! __ is_simm13(extraspace)) { 810 __ set(extraspace, G3_scratch); 811 __ sub(SP, G3_scratch, SP); 812 } else { 813 __ sub(SP, extraspace, SP); 814 } 815 set_Rdisp(G3_scratch); 816 #else 817 __ sub(SP, extraspace, SP); 818 #endif // _LP64 819 820 // First write G1 (if used) to where ever it must go 821 for (int i=0; i<total_args_passed; i++) { 822 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias; 823 VMReg r_1 = regs[i].first(); 824 VMReg r_2 = regs[i].second(); 825 if (r_1 == G1_scratch->as_VMReg()) { 826 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 827 store_c2i_object(G1_scratch, base, st_off); 828 } else if (sig_bt[i] == T_LONG) { 829 assert(!TieredCompilation, "should not use register args for longs"); 830 store_c2i_long(G1_scratch, base, st_off, false); 831 } else { 832 store_c2i_int(G1_scratch, base, st_off); 833 } 834 } 835 } 836 837 // Now write the args into the outgoing interpreter space 838 for (int i=0; i<total_args_passed; i++) { 839 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias; 840 VMReg r_1 = regs[i].first(); 841 VMReg r_2 = regs[i].second(); 842 if (!r_1->is_valid()) { 843 assert(!r_2->is_valid(), ""); 844 continue; 845 } 846 // Skip G1 if found as we did it first in order to free it up 847 if (r_1 == G1_scratch->as_VMReg()) { 848 continue; 849 } 850 #ifdef ASSERT 851 bool G1_forced = false; 852 #endif // ASSERT 853 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1 854 #ifdef _LP64 855 Register ld_off = Rdisp; 856 __ set(reg2offset(r_1) + extraspace + bias, ld_off); 857 #else 858 int ld_off = reg2offset(r_1) + extraspace + bias; 859 #ifdef ASSERT 860 G1_forced = true; 861 #endif // ASSERT 862 #endif // _LP64 863 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle 864 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); 865 else __ ldx(base, ld_off, G1_scratch); 866 } 867 868 if (r_1->is_Register()) { 869 Register r = r_1->as_Register()->after_restore(); 870 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 871 store_c2i_object(r, base, st_off); 872 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 873 if (TieredCompilation) { 874 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs"); 875 } 876 store_c2i_long(r, base, st_off, r_2->is_stack()); 877 } else { 878 store_c2i_int(r, base, st_off); 879 } 880 } else { 881 assert(r_1->is_FloatRegister(), ""); 882 if (sig_bt[i] == T_FLOAT) { 883 store_c2i_float(r_1->as_FloatRegister(), base, st_off); 884 } else { 885 assert(sig_bt[i] == T_DOUBLE, "wrong type"); 886 store_c2i_double(r_2, r_1, base, st_off); 887 } 888 } 889 } 890 891 #ifdef _LP64 892 // Need to reload G3_scratch, used for temporary displacements. 893 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 894 895 // Pass O5_savedSP as an argument to the interpreter. 896 // The interpreter will restore SP to this value before returning. 897 __ set(extraspace, G1); 898 __ add(SP, G1, O5_savedSP); 899 #else 900 // Pass O5_savedSP as an argument to the interpreter. 901 // The interpreter will restore SP to this value before returning. 902 __ add(SP, extraspace, O5_savedSP); 903 #endif // _LP64 904 905 __ mov((frame::varargs_offset)*wordSize - 906 1*Interpreter::stackElementSize()+bias+BytesPerWord, G1); 907 // Jump to the interpreter just as if interpreter was doing it. 908 __ jmpl(G3_scratch, 0, G0); 909 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp 910 // (really L0) is in use by the compiled frame as a generic temp. However, 911 // the interpreter does not know where its args are without some kind of 912 // arg pointer being passed in. Pass it in Gargs. 913 __ delayed()->add(SP, G1, Gargs); 914 } 915 916 void AdapterGenerator::gen_i2c_adapter( 917 int total_args_passed, 918 // VMReg max_arg, 919 int comp_args_on_stack, // VMRegStackSlots 920 const BasicType *sig_bt, 921 const VMRegPair *regs) { 922 923 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 924 // layout. Lesp was saved by the calling I-frame and will be restored on 925 // return. Meanwhile, outgoing arg space is all owned by the callee 926 // C-frame, so we can mangle it at will. After adjusting the frame size, 927 // hoist register arguments and repack other args according to the compiled 928 // code convention. Finally, end in a jump to the compiled code. The entry 929 // point address is the start of the buffer. 930 931 // We will only enter here from an interpreted frame and never from after 932 // passing thru a c2i. Azul allowed this but we do not. If we lose the 933 // race and use a c2i we will remain interpreted for the race loser(s). 934 // This removes all sorts of headaches on the x86 side and also eliminates 935 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 936 937 // As you can see from the list of inputs & outputs there are not a lot 938 // of temp registers to work with: mostly G1, G3 & G4. 939 940 // Inputs: 941 // G2_thread - TLS 942 // G5_method - Method oop 943 // O0 - Flag telling us to restore SP from O5 944 // O4_args - Pointer to interpreter's args 945 // O5 - Caller's saved SP, to be restored if needed 946 // O6 - Current SP! 947 // O7 - Valid return address 948 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 949 950 // Outputs: 951 // G2_thread - TLS 952 // G1, G4 - Outgoing long args in 32-bit build 953 // O0-O5 - Outgoing args in compiled layout 954 // O6 - Adjusted or restored SP 955 // O7 - Valid return address 956 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 957 // F0-F7 - more outgoing args 958 959 960 // O4 is about to get loaded up with compiled callee's args 961 __ sub(Gargs, BytesPerWord, Gargs); 962 963 #ifdef ASSERT 964 { 965 // on entry OsavedSP and SP should be equal 966 Label ok; 967 __ cmp(O5_savedSP, SP); 968 __ br(Assembler::equal, false, Assembler::pt, ok); 969 __ delayed()->nop(); 970 __ stop("I5_savedSP not set"); 971 __ should_not_reach_here(); 972 __ bind(ok); 973 } 974 #endif 975 976 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME 977 // WITH O7 HOLDING A VALID RETURN PC 978 // 979 // | | 980 // : java stack : 981 // | | 982 // +--------------+ <--- start of outgoing args 983 // | receiver | | 984 // : rest of args : |---size is java-arg-words 985 // | | | 986 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I 987 // | | | 988 // : unused : |---Space for max Java stack, plus stack alignment 989 // | | | 990 // +--------------+ <--- SP + 16*wordsize 991 // | | 992 // : window : 993 // | | 994 // +--------------+ <--- SP 995 996 // WE REPACK THE STACK. We use the common calling convention layout as 997 // discovered by calling SharedRuntime::calling_convention. We assume it 998 // causes an arbitrary shuffle of memory, which may require some register 999 // temps to do the shuffle. We hope for (and optimize for) the case where 1000 // temps are not needed. We may have to resize the stack slightly, in case 1001 // we need alignment padding (32-bit interpreter can pass longs & doubles 1002 // misaligned, but the compilers expect them aligned). 1003 // 1004 // | | 1005 // : java stack : 1006 // | | 1007 // +--------------+ <--- start of outgoing args 1008 // | pad, align | | 1009 // +--------------+ | 1010 // | ints, floats | |---Outgoing stack args, packed low. 1011 // +--------------+ | First few args in registers. 1012 // : doubles : | 1013 // | longs | | 1014 // +--------------+ <--- SP' + 16*wordsize 1015 // | | 1016 // : window : 1017 // | | 1018 // +--------------+ <--- SP' 1019 1020 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME 1021 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP 1022 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN. 1023 1024 // Cut-out for having no stack args. Since up to 6 args are passed 1025 // in registers, we will commonly have no stack args. 1026 if (comp_args_on_stack > 0) { 1027 1028 // Convert VMReg stack slots to words. 1029 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1030 // Round up to miminum stack alignment, in wordSize 1031 comp_words_on_stack = round_to(comp_words_on_stack, 2); 1032 // Now compute the distance from Lesp to SP. This calculation does not 1033 // include the space for total_args_passed because Lesp has not yet popped 1034 // the arguments. 1035 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 1036 } 1037 1038 // Will jump to the compiled code just as if compiled code was doing it. 1039 // Pre-load the register-jump target early, to schedule it better. 1040 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); 1041 1042 // Now generate the shuffle code. Pick up all register args and move the 1043 // rest through G1_scratch. 1044 for (int i=0; i<total_args_passed; i++) { 1045 if (sig_bt[i] == T_VOID) { 1046 // Longs and doubles are passed in native word order, but misaligned 1047 // in the 32-bit build. 1048 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1049 continue; 1050 } 1051 1052 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the 1053 // 32-bit build and aligned in the 64-bit build. Look for the obvious 1054 // ldx/lddf optimizations. 1055 1056 // Load in argument order going down. 1057 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize(); 1058 #ifdef _LP64 1059 set_Rdisp(G1_scratch); 1060 #endif // _LP64 1061 1062 VMReg r_1 = regs[i].first(); 1063 VMReg r_2 = regs[i].second(); 1064 if (!r_1->is_valid()) { 1065 assert(!r_2->is_valid(), ""); 1066 continue; 1067 } 1068 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9 1069 r_1 = F8->as_VMReg(); // as part of the load/store shuffle 1070 if (r_2->is_valid()) r_2 = r_1->next(); 1071 } 1072 if (r_1->is_Register()) { // Register argument 1073 Register r = r_1->as_Register()->after_restore(); 1074 if (!r_2->is_valid()) { 1075 __ ld(Gargs, arg_slot(ld_off), r); 1076 } else { 1077 #ifdef _LP64 1078 // In V9, longs are given 2 64-bit slots in the interpreter, but the 1079 // data is passed in only 1 slot. 1080 Register slot = (sig_bt[i]==T_LONG) ? 1081 next_arg_slot(ld_off) : arg_slot(ld_off); 1082 __ ldx(Gargs, slot, r); 1083 #else 1084 // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the 1085 // stack shuffle. Load the first 2 longs into G1/G4 later. 1086 #endif 1087 } 1088 } else { 1089 assert(r_1->is_FloatRegister(), ""); 1090 if (!r_2->is_valid()) { 1091 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); 1092 } else { 1093 #ifdef _LP64 1094 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 1095 // data is passed in only 1 slot. This code also handles longs that 1096 // are passed on the stack, but need a stack-to-stack move through a 1097 // spare float register. 1098 Register slot = (sig_bt[i]==T_LONG || sig_bt[i] == T_DOUBLE) ? 1099 next_arg_slot(ld_off) : arg_slot(ld_off); 1100 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); 1101 #else 1102 // Need to marshal 64-bit value from misaligned Lesp loads 1103 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister()); 1104 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister()); 1105 #endif 1106 } 1107 } 1108 // Was the argument really intended to be on the stack, but was loaded 1109 // into F8/F9? 1110 if (regs[i].first()->is_stack()) { 1111 assert(r_1->as_FloatRegister() == F8, "fix this code"); 1112 // Convert stack slot to an SP offset 1113 int st_off = reg2offset(regs[i].first()) + STACK_BIAS; 1114 // Store down the shuffled stack word. Target address _is_ aligned. 1115 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, st_off); 1116 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, st_off); 1117 } 1118 } 1119 bool made_space = false; 1120 #ifndef _LP64 1121 // May need to pick up a few long args in G1/G4 1122 bool g4_crushed = false; 1123 bool g3_crushed = false; 1124 for (int i=0; i<total_args_passed; i++) { 1125 if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) { 1126 // Load in argument order going down 1127 int ld_off = (total_args_passed-i)*Interpreter::stackElementSize(); 1128 // Need to marshal 64-bit value from misaligned Lesp loads 1129 Register r = regs[i].first()->as_Register()->after_restore(); 1130 if (r == G1 || r == G4) { 1131 assert(!g4_crushed, "ordering problem"); 1132 if (r == G4){ 1133 g4_crushed = true; 1134 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits 1135 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits 1136 } else { 1137 // better schedule this way 1138 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits 1139 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits 1140 } 1141 g3_crushed = true; 1142 __ sllx(r, 32, r); 1143 __ or3(G3_scratch, r, r); 1144 } else { 1145 assert(r->is_out(), "longs passed in two O registers"); 1146 __ ld (Gargs, arg_slot(ld_off) , r->successor()); // Load lo bits 1147 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits 1148 } 1149 } 1150 } 1151 #endif 1152 1153 // Jump to the compiled code just as if compiled code was doing it. 1154 // 1155 #ifndef _LP64 1156 if (g3_crushed) { 1157 // Rats load was wasted, at least it is in cache... 1158 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); 1159 } 1160 #endif /* _LP64 */ 1161 1162 // 6243940 We might end up in handle_wrong_method if 1163 // the callee is deoptimized as we race thru here. If that 1164 // happens we don't want to take a safepoint because the 1165 // caller frame will look interpreted and arguments are now 1166 // "compiled" so it is much better to make this transition 1167 // invisible to the stack walking code. Unfortunately if 1168 // we try and find the callee by normal means a safepoint 1169 // is possible. So we stash the desired callee in the thread 1170 // and the vm will find there should this case occur. 1171 Address callee_target_addr(G2_thread, 0, in_bytes(JavaThread::callee_target_offset())); 1172 __ st_ptr(G5_method, callee_target_addr); 1173 1174 if (StressNonEntrant) { 1175 // Open a big window for deopt failure 1176 __ save_frame(0); 1177 __ mov(G0, L0); 1178 Label loop; 1179 __ bind(loop); 1180 __ sub(L0, 1, L0); 1181 __ br_null(L0, false, Assembler::pt, loop); 1182 __ delayed()->nop(); 1183 1184 __ restore(); 1185 } 1186 1187 1188 __ jmpl(G3, 0, G0); 1189 __ delayed()->nop(); 1190 } 1191 1192 // --------------------------------------------------------------- 1193 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1194 int total_args_passed, 1195 // VMReg max_arg, 1196 int comp_args_on_stack, // VMRegStackSlots 1197 const BasicType *sig_bt, 1198 const VMRegPair *regs) { 1199 address i2c_entry = __ pc(); 1200 1201 AdapterGenerator agen(masm); 1202 1203 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 1204 1205 1206 // ------------------------------------------------------------------------- 1207 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The 1208 // args start out packed in the compiled layout. They need to be unpacked 1209 // into the interpreter layout. This will almost always require some stack 1210 // space. We grow the current (compiled) stack, then repack the args. We 1211 // finally end in a jump to the generic interpreter entry point. On exit 1212 // from the interpreter, the interpreter will restore our SP (lest the 1213 // compiled code, which relys solely on SP and not FP, get sick). 1214 1215 address c2i_unverified_entry = __ pc(); 1216 Label skip_fixup; 1217 { 1218 #if !defined(_LP64) && defined(COMPILER2) 1219 Register R_temp = L0; // another scratch register 1220 #else 1221 Register R_temp = G1; // another scratch register 1222 #endif 1223 1224 Address ic_miss(G3_scratch, SharedRuntime::get_ic_miss_stub()); 1225 1226 __ verify_oop(O0); 1227 __ verify_oop(G5_method); 1228 __ load_klass(O0, G3_scratch); 1229 __ verify_oop(G3_scratch); 1230 1231 #if !defined(_LP64) && defined(COMPILER2) 1232 __ save(SP, -frame::register_save_words*wordSize, SP); 1233 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1234 __ verify_oop(R_temp); 1235 __ cmp(G3_scratch, R_temp); 1236 __ restore(); 1237 #else 1238 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1239 __ verify_oop(R_temp); 1240 __ cmp(G3_scratch, R_temp); 1241 #endif 1242 1243 Label ok, ok2; 1244 __ brx(Assembler::equal, false, Assembler::pt, ok); 1245 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method); 1246 __ jump_to(ic_miss); 1247 __ delayed()->nop(); 1248 1249 __ bind(ok); 1250 // Method might have been compiled since the call site was patched to 1251 // interpreted if that is the case treat it as a miss so we can get 1252 // the call site corrected. 1253 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 1254 __ bind(ok2); 1255 __ br_null(G3_scratch, false, __ pt, skip_fixup); 1256 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 1257 __ jump_to(ic_miss); 1258 __ delayed()->nop(); 1259 1260 } 1261 1262 address c2i_entry = __ pc(); 1263 1264 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); 1265 1266 __ flush(); 1267 return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry); 1268 1269 } 1270 1271 // Helper function for native calling conventions 1272 static VMReg int_stk_helper( int i ) { 1273 // Bias any stack based VMReg we get by ignoring the window area 1274 // but not the register parameter save area. 1275 // 1276 // This is strange for the following reasons. We'd normally expect 1277 // the calling convention to return an VMReg for a stack slot 1278 // completely ignoring any abi reserved area. C2 thinks of that 1279 // abi area as only out_preserve_stack_slots. This does not include 1280 // the area allocated by the C abi to store down integer arguments 1281 // because the java calling convention does not use it. So 1282 // since c2 assumes that there are only out_preserve_stack_slots 1283 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack 1284 // location the c calling convention must add in this bias amount 1285 // to make up for the fact that the out_preserve_stack_slots is 1286 // insufficient for C calls. What a mess. I sure hope those 6 1287 // stack words were worth it on every java call! 1288 1289 // Another way of cleaning this up would be for out_preserve_stack_slots 1290 // to take a parameter to say whether it was C or java calling conventions. 1291 // Then things might look a little better (but not much). 1292 1293 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM; 1294 if( mem_parm_offset < 0 ) { 1295 return as_oRegister(i)->as_VMReg(); 1296 } else { 1297 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word; 1298 // Now return a biased offset that will be correct when out_preserve_slots is added back in 1299 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots()); 1300 } 1301 } 1302 1303 1304 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1305 VMRegPair *regs, 1306 int total_args_passed) { 1307 1308 // Return the number of VMReg stack_slots needed for the args. 1309 // This value does not include an abi space (like register window 1310 // save area). 1311 1312 // The native convention is V8 if !LP64 1313 // The LP64 convention is the V9 convention which is slightly more sane. 1314 1315 // We return the amount of VMReg stack slots we need to reserve for all 1316 // the arguments NOT counting out_preserve_stack_slots. Since we always 1317 // have space for storing at least 6 registers to memory we start with that. 1318 // See int_stk_helper for a further discussion. 1319 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); 1320 1321 #ifdef _LP64 1322 // V9 convention: All things "as-if" on double-wide stack slots. 1323 // Hoist any int/ptr/long's in the first 6 to int regs. 1324 // Hoist any flt/dbl's in the first 16 dbl regs. 1325 int j = 0; // Count of actual args, not HALVES 1326 for( int i=0; i<total_args_passed; i++, j++ ) { 1327 switch( sig_bt[i] ) { 1328 case T_BOOLEAN: 1329 case T_BYTE: 1330 case T_CHAR: 1331 case T_INT: 1332 case T_SHORT: 1333 regs[i].set1( int_stk_helper( j ) ); break; 1334 case T_LONG: 1335 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1336 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1337 case T_ARRAY: 1338 case T_OBJECT: 1339 regs[i].set2( int_stk_helper( j ) ); 1340 break; 1341 case T_FLOAT: 1342 if ( j < 16 ) { 1343 // V9ism: floats go in ODD registers 1344 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg()); 1345 } else { 1346 // V9ism: floats go in ODD stack slot 1347 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1))); 1348 } 1349 break; 1350 case T_DOUBLE: 1351 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1352 if ( j < 16 ) { 1353 // V9ism: doubles go in EVEN/ODD regs 1354 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg()); 1355 } else { 1356 // V9ism: doubles go in EVEN/ODD stack slots 1357 regs[i].set2(VMRegImpl::stack2reg(j<<1)); 1358 } 1359 break; 1360 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES 1361 default: 1362 ShouldNotReachHere(); 1363 } 1364 if (regs[i].first()->is_stack()) { 1365 int off = regs[i].first()->reg2stack(); 1366 if (off > max_stack_slots) max_stack_slots = off; 1367 } 1368 if (regs[i].second()->is_stack()) { 1369 int off = regs[i].second()->reg2stack(); 1370 if (off > max_stack_slots) max_stack_slots = off; 1371 } 1372 } 1373 1374 #else // _LP64 1375 // V8 convention: first 6 things in O-regs, rest on stack. 1376 // Alignment is willy-nilly. 1377 for( int i=0; i<total_args_passed; i++ ) { 1378 switch( sig_bt[i] ) { 1379 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1380 case T_ARRAY: 1381 case T_BOOLEAN: 1382 case T_BYTE: 1383 case T_CHAR: 1384 case T_FLOAT: 1385 case T_INT: 1386 case T_OBJECT: 1387 case T_SHORT: 1388 regs[i].set1( int_stk_helper( i ) ); 1389 break; 1390 case T_DOUBLE: 1391 case T_LONG: 1392 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1393 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) ); 1394 break; 1395 case T_VOID: regs[i].set_bad(); break; 1396 default: 1397 ShouldNotReachHere(); 1398 } 1399 if (regs[i].first()->is_stack()) { 1400 int off = regs[i].first()->reg2stack(); 1401 if (off > max_stack_slots) max_stack_slots = off; 1402 } 1403 if (regs[i].second()->is_stack()) { 1404 int off = regs[i].second()->reg2stack(); 1405 if (off > max_stack_slots) max_stack_slots = off; 1406 } 1407 } 1408 #endif // _LP64 1409 1410 return round_to(max_stack_slots + 1, 2); 1411 1412 } 1413 1414 1415 // --------------------------------------------------------------------------- 1416 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1417 switch (ret_type) { 1418 case T_FLOAT: 1419 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS); 1420 break; 1421 case T_DOUBLE: 1422 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS); 1423 break; 1424 } 1425 } 1426 1427 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1428 switch (ret_type) { 1429 case T_FLOAT: 1430 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0); 1431 break; 1432 case T_DOUBLE: 1433 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0); 1434 break; 1435 } 1436 } 1437 1438 // Check and forward and pending exception. Thread is stored in 1439 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there 1440 // is no exception handler. We merely pop this frame off and throw the 1441 // exception in the caller's frame. 1442 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) { 1443 Label L; 1444 __ br_null(Rex_oop, false, Assembler::pt, L); 1445 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1446 // Since this is a native call, we *know* the proper exception handler 1447 // without calling into the VM: it's the empty function. Just pop this 1448 // frame and then jump to forward_exception_entry; O7 will contain the 1449 // native caller's return PC. 1450 Address exception_entry(G3_scratch, StubRoutines::forward_exception_entry()); 1451 __ jump_to(exception_entry); 1452 __ delayed()->restore(); // Pop this frame off. 1453 __ bind(L); 1454 } 1455 1456 // A simple move of integer like type 1457 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1458 if (src.first()->is_stack()) { 1459 if (dst.first()->is_stack()) { 1460 // stack to stack 1461 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1462 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1463 } else { 1464 // stack to reg 1465 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1466 } 1467 } else if (dst.first()->is_stack()) { 1468 // reg to stack 1469 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1470 } else { 1471 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1472 } 1473 } 1474 1475 // On 64 bit we will store integer like items to the stack as 1476 // 64 bits items (sparc abi) even though java would only store 1477 // 32bits for a parameter. On 32bit it will simply be 32 bits 1478 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 1479 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1480 if (src.first()->is_stack()) { 1481 if (dst.first()->is_stack()) { 1482 // stack to stack 1483 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1484 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1485 } else { 1486 // stack to reg 1487 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1488 } 1489 } else if (dst.first()->is_stack()) { 1490 // reg to stack 1491 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1492 } else { 1493 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1494 } 1495 } 1496 1497 1498 // An oop arg. Must pass a handle not the oop itself 1499 static void object_move(MacroAssembler* masm, 1500 OopMap* map, 1501 int oop_handle_offset, 1502 int framesize_in_slots, 1503 VMRegPair src, 1504 VMRegPair dst, 1505 bool is_receiver, 1506 int* receiver_offset) { 1507 1508 // must pass a handle. First figure out the location we use as a handle 1509 1510 if (src.first()->is_stack()) { 1511 // Oop is already on the stack 1512 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); 1513 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); 1514 __ ld_ptr(rHandle, 0, L4); 1515 #ifdef _LP64 1516 __ movr( Assembler::rc_z, L4, G0, rHandle ); 1517 #else 1518 __ tst( L4 ); 1519 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1520 #endif 1521 if (dst.first()->is_stack()) { 1522 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1523 } 1524 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1525 if (is_receiver) { 1526 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1527 } 1528 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1529 } else { 1530 // Oop is in an input register pass we must flush it to the stack 1531 const Register rOop = src.first()->as_Register(); 1532 const Register rHandle = L5; 1533 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; 1534 int offset = oop_slot*VMRegImpl::stack_slot_size; 1535 Label skip; 1536 __ st_ptr(rOop, SP, offset + STACK_BIAS); 1537 if (is_receiver) { 1538 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size; 1539 } 1540 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1541 __ add(SP, offset + STACK_BIAS, rHandle); 1542 #ifdef _LP64 1543 __ movr( Assembler::rc_z, rOop, G0, rHandle ); 1544 #else 1545 __ tst( rOop ); 1546 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1547 #endif 1548 1549 if (dst.first()->is_stack()) { 1550 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1551 } else { 1552 __ mov(rHandle, dst.first()->as_Register()); 1553 } 1554 } 1555 } 1556 1557 // A float arg may have to do float reg int reg conversion 1558 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1559 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1560 1561 if (src.first()->is_stack()) { 1562 if (dst.first()->is_stack()) { 1563 // stack to stack the easiest of the bunch 1564 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1565 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1566 } else { 1567 // stack to reg 1568 if (dst.first()->is_Register()) { 1569 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1570 } else { 1571 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1572 } 1573 } 1574 } else if (dst.first()->is_stack()) { 1575 // reg to stack 1576 if (src.first()->is_Register()) { 1577 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1578 } else { 1579 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1580 } 1581 } else { 1582 // reg to reg 1583 if (src.first()->is_Register()) { 1584 if (dst.first()->is_Register()) { 1585 // gpr -> gpr 1586 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1587 } else { 1588 // gpr -> fpr 1589 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS); 1590 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister()); 1591 } 1592 } else if (dst.first()->is_Register()) { 1593 // fpr -> gpr 1594 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS); 1595 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register()); 1596 } else { 1597 // fpr -> fpr 1598 // In theory these overlap but the ordering is such that this is likely a nop 1599 if ( src.first() != dst.first()) { 1600 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1601 } 1602 } 1603 } 1604 } 1605 1606 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1607 VMRegPair src_lo(src.first()); 1608 VMRegPair src_hi(src.second()); 1609 VMRegPair dst_lo(dst.first()); 1610 VMRegPair dst_hi(dst.second()); 1611 simple_move32(masm, src_lo, dst_lo); 1612 simple_move32(masm, src_hi, dst_hi); 1613 } 1614 1615 // A long move 1616 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1617 1618 // Do the simple ones here else do two int moves 1619 if (src.is_single_phys_reg() ) { 1620 if (dst.is_single_phys_reg()) { 1621 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1622 } else { 1623 // split src into two separate registers 1624 // Remember hi means hi address or lsw on sparc 1625 // Move msw to lsw 1626 if (dst.second()->is_reg()) { 1627 // MSW -> MSW 1628 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register()); 1629 // Now LSW -> LSW 1630 // this will only move lo -> lo and ignore hi 1631 VMRegPair split(dst.second()); 1632 simple_move32(masm, src, split); 1633 } else { 1634 VMRegPair split(src.first(), L4->as_VMReg()); 1635 // MSW -> MSW (lo ie. first word) 1636 __ srax(src.first()->as_Register(), 32, L4); 1637 split_long_move(masm, split, dst); 1638 } 1639 } 1640 } else if (dst.is_single_phys_reg()) { 1641 if (src.is_adjacent_aligned_on_stack(2)) { 1642 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1643 } else { 1644 // dst is a single reg. 1645 // Remember lo is low address not msb for stack slots 1646 // and lo is the "real" register for registers 1647 // src is 1648 1649 VMRegPair split; 1650 1651 if (src.first()->is_reg()) { 1652 // src.lo (msw) is a reg, src.hi is stk/reg 1653 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg] 1654 split.set_pair(dst.first(), src.first()); 1655 } else { 1656 // msw is stack move to L5 1657 // lsw is stack move to dst.lo (real reg) 1658 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5 1659 split.set_pair(dst.first(), L5->as_VMReg()); 1660 } 1661 1662 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg) 1663 // msw -> src.lo/L5, lsw -> dst.lo 1664 split_long_move(masm, src, split); 1665 1666 // So dst now has the low order correct position the 1667 // msw half 1668 __ sllx(split.first()->as_Register(), 32, L5); 1669 1670 const Register d = dst.first()->as_Register(); 1671 __ or3(L5, d, d); 1672 } 1673 } else { 1674 // For LP64 we can probably do better. 1675 split_long_move(masm, src, dst); 1676 } 1677 } 1678 1679 // A double move 1680 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1681 1682 // The painful thing here is that like long_move a VMRegPair might be 1683 // 1: a single physical register 1684 // 2: two physical registers (v8) 1685 // 3: a physical reg [lo] and a stack slot [hi] (v8) 1686 // 4: two stack slots 1687 1688 // Since src is always a java calling convention we know that the src pair 1689 // is always either all registers or all stack (and aligned?) 1690 1691 // in a register [lo] and a stack slot [hi] 1692 if (src.first()->is_stack()) { 1693 if (dst.first()->is_stack()) { 1694 // stack to stack the easiest of the bunch 1695 // ought to be a way to do this where if alignment is ok we use ldd/std when possible 1696 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1697 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1698 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1699 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1700 } else { 1701 // stack to reg 1702 if (dst.second()->is_stack()) { 1703 // stack -> reg, stack -> stack 1704 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1705 if (dst.first()->is_Register()) { 1706 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1707 } else { 1708 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1709 } 1710 // This was missing. (very rare case) 1711 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1712 } else { 1713 // stack -> reg 1714 // Eventually optimize for alignment QQQ 1715 if (dst.first()->is_Register()) { 1716 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1717 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register()); 1718 } else { 1719 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1720 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister()); 1721 } 1722 } 1723 } 1724 } else if (dst.first()->is_stack()) { 1725 // reg to stack 1726 if (src.first()->is_Register()) { 1727 // Eventually optimize for alignment QQQ 1728 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1729 if (src.second()->is_stack()) { 1730 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1731 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1732 } else { 1733 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS); 1734 } 1735 } else { 1736 // fpr to stack 1737 if (src.second()->is_stack()) { 1738 ShouldNotReachHere(); 1739 } else { 1740 // Is the stack aligned? 1741 if (reg2offset(dst.first()) & 0x7) { 1742 // No do as pairs 1743 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1744 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS); 1745 } else { 1746 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1747 } 1748 } 1749 } 1750 } else { 1751 // reg to reg 1752 if (src.first()->is_Register()) { 1753 if (dst.first()->is_Register()) { 1754 // gpr -> gpr 1755 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1756 __ mov(src.second()->as_Register(), dst.second()->as_Register()); 1757 } else { 1758 // gpr -> fpr 1759 // ought to be able to do a single store 1760 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS); 1761 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS); 1762 // ought to be able to do a single load 1763 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister()); 1764 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister()); 1765 } 1766 } else if (dst.first()->is_Register()) { 1767 // fpr -> gpr 1768 // ought to be able to do a single store 1769 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS); 1770 // ought to be able to do a single load 1771 // REMEMBER first() is low address not LSB 1772 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register()); 1773 if (dst.second()->is_Register()) { 1774 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register()); 1775 } else { 1776 __ ld(FP, -4 + STACK_BIAS, L4); 1777 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1778 } 1779 } else { 1780 // fpr -> fpr 1781 // In theory these overlap but the ordering is such that this is likely a nop 1782 if ( src.first() != dst.first()) { 1783 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1784 } 1785 } 1786 } 1787 } 1788 1789 // Creates an inner frame if one hasn't already been created, and 1790 // saves a copy of the thread in L7_thread_cache 1791 static void create_inner_frame(MacroAssembler* masm, bool* already_created) { 1792 if (!*already_created) { 1793 __ save_frame(0); 1794 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below 1795 // Don't use save_thread because it smashes G2 and we merely want to save a 1796 // copy 1797 __ mov(G2_thread, L7_thread_cache); 1798 *already_created = true; 1799 } 1800 } 1801 1802 // --------------------------------------------------------------------------- 1803 // Generate a native wrapper for a given method. The method takes arguments 1804 // in the Java compiled code convention, marshals them to the native 1805 // convention (handlizes oops, etc), transitions to native, makes the call, 1806 // returns to java state (possibly blocking), unhandlizes any result and 1807 // returns. 1808 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1809 methodHandle method, 1810 int total_in_args, 1811 int comp_args_on_stack, // in VMRegStackSlots 1812 BasicType *in_sig_bt, 1813 VMRegPair *in_regs, 1814 BasicType ret_type) { 1815 1816 // Native nmethod wrappers never take possesion of the oop arguments. 1817 // So the caller will gc the arguments. The only thing we need an 1818 // oopMap for is if the call is static 1819 // 1820 // An OopMap for lock (and class if static), and one for the VM call itself 1821 OopMapSet *oop_maps = new OopMapSet(); 1822 intptr_t start = (intptr_t)__ pc(); 1823 1824 // First thing make an ic check to see if we should even be here 1825 { 1826 Label L; 1827 const Register temp_reg = G3_scratch; 1828 Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); 1829 __ verify_oop(O0); 1830 __ load_klass(O0, temp_reg); 1831 __ cmp(temp_reg, G5_inline_cache_reg); 1832 __ brx(Assembler::equal, true, Assembler::pt, L); 1833 __ delayed()->nop(); 1834 1835 __ jump_to(ic_miss, 0); 1836 __ delayed()->nop(); 1837 __ align(CodeEntryAlignment); 1838 __ bind(L); 1839 } 1840 1841 int vep_offset = ((intptr_t)__ pc()) - start; 1842 1843 #ifdef COMPILER1 1844 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) { 1845 // Object.hashCode can pull the hashCode from the header word 1846 // instead of doing a full VM transition once it's been computed. 1847 // Since hashCode is usually polymorphic at call sites we can't do 1848 // this optimization at the call site without a lot of work. 1849 Label slowCase; 1850 Register receiver = O0; 1851 Register result = O0; 1852 Register header = G3_scratch; 1853 Register hash = G3_scratch; // overwrite header value with hash value 1854 Register mask = G1; // to get hash field from header 1855 1856 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked. 1857 // We depend on hash_mask being at most 32 bits and avoid the use of 1858 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 1859 // vm: see markOop.hpp. 1860 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header); 1861 __ sethi(markOopDesc::hash_mask, mask); 1862 __ btst(markOopDesc::unlocked_value, header); 1863 __ br(Assembler::zero, false, Assembler::pn, slowCase); 1864 if (UseBiasedLocking) { 1865 // Check if biased and fall through to runtime if so 1866 __ delayed()->nop(); 1867 __ btst(markOopDesc::biased_lock_bit_in_place, header); 1868 __ br(Assembler::notZero, false, Assembler::pn, slowCase); 1869 } 1870 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask); 1871 1872 // Check for a valid (non-zero) hash code and get its value. 1873 #ifdef _LP64 1874 __ srlx(header, markOopDesc::hash_shift, hash); 1875 #else 1876 __ srl(header, markOopDesc::hash_shift, hash); 1877 #endif 1878 __ andcc(hash, mask, hash); 1879 __ br(Assembler::equal, false, Assembler::pn, slowCase); 1880 __ delayed()->nop(); 1881 1882 // leaf return. 1883 __ retl(); 1884 __ delayed()->mov(hash, result); 1885 __ bind(slowCase); 1886 } 1887 #endif // COMPILER1 1888 1889 1890 // We have received a description of where all the java arg are located 1891 // on entry to the wrapper. We need to convert these args to where 1892 // the jni function will expect them. To figure out where they go 1893 // we convert the java signature to a C signature by inserting 1894 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1895 1896 int total_c_args = total_in_args + 1; 1897 if (method->is_static()) { 1898 total_c_args++; 1899 } 1900 1901 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1902 VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1903 1904 int argc = 0; 1905 out_sig_bt[argc++] = T_ADDRESS; 1906 if (method->is_static()) { 1907 out_sig_bt[argc++] = T_OBJECT; 1908 } 1909 1910 for (int i = 0; i < total_in_args ; i++ ) { 1911 out_sig_bt[argc++] = in_sig_bt[i]; 1912 } 1913 1914 // Now figure out where the args must be stored and how much stack space 1915 // they require (neglecting out_preserve_stack_slots but space for storing 1916 // the 1st six register arguments). It's weird see int_stk_helper. 1917 // 1918 int out_arg_slots; 1919 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 1920 1921 // Compute framesize for the wrapper. We need to handlize all oops in 1922 // registers. We must create space for them here that is disjoint from 1923 // the windowed save area because we have no control over when we might 1924 // flush the window again and overwrite values that gc has since modified. 1925 // (The live window race) 1926 // 1927 // We always just allocate 6 word for storing down these object. This allow 1928 // us to simply record the base and use the Ireg number to decide which 1929 // slot to use. (Note that the reg number is the inbound number not the 1930 // outbound number). 1931 // We must shuffle args to match the native convention, and include var-args space. 1932 1933 // Calculate the total number of stack slots we will need. 1934 1935 // First count the abi requirement plus all of the outgoing args 1936 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1937 1938 // Now the space for the inbound oop handle area 1939 1940 int oop_handle_offset = stack_slots; 1941 stack_slots += 6*VMRegImpl::slots_per_word; 1942 1943 // Now any space we need for handlizing a klass if static method 1944 1945 int oop_temp_slot_offset = 0; 1946 int klass_slot_offset = 0; 1947 int klass_offset = -1; 1948 int lock_slot_offset = 0; 1949 bool is_static = false; 1950 1951 if (method->is_static()) { 1952 klass_slot_offset = stack_slots; 1953 stack_slots += VMRegImpl::slots_per_word; 1954 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1955 is_static = true; 1956 } 1957 1958 // Plus a lock if needed 1959 1960 if (method->is_synchronized()) { 1961 lock_slot_offset = stack_slots; 1962 stack_slots += VMRegImpl::slots_per_word; 1963 } 1964 1965 // Now a place to save return value or as a temporary for any gpr -> fpr moves 1966 stack_slots += 2; 1967 1968 // Ok The space we have allocated will look like: 1969 // 1970 // 1971 // FP-> | | 1972 // |---------------------| 1973 // | 2 slots for moves | 1974 // |---------------------| 1975 // | lock box (if sync) | 1976 // |---------------------| <- lock_slot_offset 1977 // | klass (if static) | 1978 // |---------------------| <- klass_slot_offset 1979 // | oopHandle area | 1980 // |---------------------| <- oop_handle_offset 1981 // | outbound memory | 1982 // | based arguments | 1983 // | | 1984 // |---------------------| 1985 // | vararg area | 1986 // |---------------------| 1987 // | | 1988 // SP-> | out_preserved_slots | 1989 // 1990 // 1991 1992 1993 // Now compute actual number of stack words we need rounding to make 1994 // stack properly aligned. 1995 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word); 1996 1997 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 1998 1999 // Generate stack overflow check before creating frame 2000 __ generate_stack_overflow_check(stack_size); 2001 2002 // Generate a new frame for the wrapper. 2003 __ save(SP, -stack_size, SP); 2004 2005 int frame_complete = ((intptr_t)__ pc()) - start; 2006 2007 __ verify_thread(); 2008 2009 2010 // 2011 // We immediately shuffle the arguments so that any vm call we have to 2012 // make from here on out (sync slow path, jvmti, etc.) we will have 2013 // captured the oops from our caller and have a valid oopMap for 2014 // them. 2015 2016 // ----------------- 2017 // The Grand Shuffle 2018 // 2019 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2020 // (derived from JavaThread* which is in L7_thread_cache) and, if static, 2021 // the class mirror instead of a receiver. This pretty much guarantees that 2022 // register layout will not match. We ignore these extra arguments during 2023 // the shuffle. The shuffle is described by the two calling convention 2024 // vectors we have in our possession. We simply walk the java vector to 2025 // get the source locations and the c vector to get the destinations. 2026 // Because we have a new window and the argument registers are completely 2027 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about 2028 // here. 2029 2030 // This is a trick. We double the stack slots so we can claim 2031 // the oops in the caller's frame. Since we are sure to have 2032 // more args than the caller doubling is enough to make 2033 // sure we can capture all the incoming oop args from the 2034 // caller. 2035 // 2036 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2037 int c_arg = total_c_args - 1; 2038 // Record sp-based slot for receiver on stack for non-static methods 2039 int receiver_offset = -1; 2040 2041 // We move the arguments backward because the floating point registers 2042 // destination will always be to a register with a greater or equal register 2043 // number or the stack. 2044 2045 #ifdef ASSERT 2046 bool reg_destroyed[RegisterImpl::number_of_registers]; 2047 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2048 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2049 reg_destroyed[r] = false; 2050 } 2051 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2052 freg_destroyed[f] = false; 2053 } 2054 2055 #endif /* ASSERT */ 2056 2057 for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) { 2058 2059 #ifdef ASSERT 2060 if (in_regs[i].first()->is_Register()) { 2061 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2062 } else if (in_regs[i].first()->is_FloatRegister()) { 2063 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!"); 2064 } 2065 if (out_regs[c_arg].first()->is_Register()) { 2066 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2067 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 2068 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true; 2069 } 2070 #endif /* ASSERT */ 2071 2072 switch (in_sig_bt[i]) { 2073 case T_ARRAY: 2074 case T_OBJECT: 2075 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2076 ((i == 0) && (!is_static)), 2077 &receiver_offset); 2078 break; 2079 case T_VOID: 2080 break; 2081 2082 case T_FLOAT: 2083 float_move(masm, in_regs[i], out_regs[c_arg]); 2084 break; 2085 2086 case T_DOUBLE: 2087 assert( i + 1 < total_in_args && 2088 in_sig_bt[i + 1] == T_VOID && 2089 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2090 double_move(masm, in_regs[i], out_regs[c_arg]); 2091 break; 2092 2093 case T_LONG : 2094 long_move(masm, in_regs[i], out_regs[c_arg]); 2095 break; 2096 2097 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2098 2099 default: 2100 move32_64(masm, in_regs[i], out_regs[c_arg]); 2101 } 2102 } 2103 2104 // Pre-load a static method's oop into O1. Used both by locking code and 2105 // the normal JNI call code. 2106 if (method->is_static()) { 2107 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1); 2108 2109 // Now handlize the static class mirror in O1. It's known not-null. 2110 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2111 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2112 __ add(SP, klass_offset + STACK_BIAS, O1); 2113 } 2114 2115 2116 const Register L6_handle = L6; 2117 2118 if (method->is_synchronized()) { 2119 __ mov(O1, L6_handle); 2120 } 2121 2122 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2123 // except O6/O7. So if we must call out we must push a new frame. We immediately 2124 // push a new frame and flush the windows. 2125 2126 #ifdef _LP64 2127 intptr_t thepc = (intptr_t) __ pc(); 2128 { 2129 address here = __ pc(); 2130 // Call the next instruction 2131 __ call(here + 8, relocInfo::none); 2132 __ delayed()->nop(); 2133 } 2134 #else 2135 intptr_t thepc = __ load_pc_address(O7, 0); 2136 #endif /* _LP64 */ 2137 2138 // We use the same pc/oopMap repeatedly when we call out 2139 oop_maps->add_gc_map(thepc - start, map); 2140 2141 // O7 now has the pc loaded that we will use when we finally call to native. 2142 2143 // Save thread in L7; it crosses a bunch of VM calls below 2144 // Don't use save_thread because it smashes G2 and we merely 2145 // want to save a copy 2146 __ mov(G2_thread, L7_thread_cache); 2147 2148 2149 // If we create an inner frame once is plenty 2150 // when we create it we must also save G2_thread 2151 bool inner_frame_created = false; 2152 2153 // dtrace method entry support 2154 { 2155 SkipIfEqual skip_if( 2156 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2157 // create inner frame 2158 __ save_frame(0); 2159 __ mov(G2_thread, L7_thread_cache); 2160 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2161 __ call_VM_leaf(L7_thread_cache, 2162 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2163 G2_thread, O1); 2164 __ restore(); 2165 } 2166 2167 // RedefineClasses() tracing support for obsolete method entry 2168 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2169 // create inner frame 2170 __ save_frame(0); 2171 __ mov(G2_thread, L7_thread_cache); 2172 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2173 __ call_VM_leaf(L7_thread_cache, 2174 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2175 G2_thread, O1); 2176 __ restore(); 2177 } 2178 2179 // We are in the jni frame unless saved_frame is true in which case 2180 // we are in one frame deeper (the "inner" frame). If we are in the 2181 // "inner" frames the args are in the Iregs and if the jni frame then 2182 // they are in the Oregs. 2183 // If we ever need to go to the VM (for locking, jvmti) then 2184 // we will always be in the "inner" frame. 2185 2186 // Lock a synchronized method 2187 int lock_offset = -1; // Set if locked 2188 if (method->is_synchronized()) { 2189 Register Roop = O1; 2190 const Register L3_box = L3; 2191 2192 create_inner_frame(masm, &inner_frame_created); 2193 2194 __ ld_ptr(I1, 0, O1); 2195 Label done; 2196 2197 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size); 2198 __ add(FP, lock_offset+STACK_BIAS, L3_box); 2199 #ifdef ASSERT 2200 if (UseBiasedLocking) { 2201 // making the box point to itself will make it clear it went unused 2202 // but also be obviously invalid 2203 __ st_ptr(L3_box, L3_box, 0); 2204 } 2205 #endif // ASSERT 2206 // 2207 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch 2208 // 2209 __ compiler_lock_object(Roop, L1, L3_box, L2); 2210 __ br(Assembler::equal, false, Assembler::pt, done); 2211 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box); 2212 2213 2214 // None of the above fast optimizations worked so we have to get into the 2215 // slow case of monitor enter. Inline a special case of call_VM that 2216 // disallows any pending_exception. 2217 __ mov(Roop, O0); // Need oop in O0 2218 __ mov(L3_box, O1); 2219 2220 // Record last_Java_sp, in case the VM code releases the JVM lock. 2221 2222 __ set_last_Java_frame(FP, I7); 2223 2224 // do the call 2225 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2226 __ delayed()->mov(L7_thread_cache, O2); 2227 2228 __ restore_thread(L7_thread_cache); // restore G2_thread 2229 __ reset_last_Java_frame(); 2230 2231 #ifdef ASSERT 2232 { Label L; 2233 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2234 __ br_null(O0, false, Assembler::pt, L); 2235 __ delayed()->nop(); 2236 __ stop("no pending exception allowed on exit from IR::monitorenter"); 2237 __ bind(L); 2238 } 2239 #endif 2240 __ bind(done); 2241 } 2242 2243 2244 // Finally just about ready to make the JNI call 2245 2246 __ flush_windows(); 2247 if (inner_frame_created) { 2248 __ restore(); 2249 } else { 2250 // Store only what we need from this frame 2251 // QQQ I think that non-v9 (like we care) we don't need these saves 2252 // either as the flush traps and the current window goes too. 2253 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2254 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2255 } 2256 2257 // get JNIEnv* which is first argument to native 2258 2259 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2260 2261 // Use that pc we placed in O7 a while back as the current frame anchor 2262 2263 __ set_last_Java_frame(SP, O7); 2264 2265 // Transition from _thread_in_Java to _thread_in_native. 2266 __ set(_thread_in_native, G3_scratch); 2267 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); 2268 2269 // We flushed the windows ages ago now mark them as flushed 2270 2271 // mark windows as flushed 2272 __ set(JavaFrameAnchor::flushed, G3_scratch); 2273 2274 Address flags(G2_thread, 2275 0, 2276 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); 2277 2278 #ifdef _LP64 2279 Address dest(O7, method->native_function()); 2280 __ relocate(relocInfo::runtime_call_type); 2281 __ jumpl_to(dest, O7); 2282 #else 2283 __ call(method->native_function(), relocInfo::runtime_call_type); 2284 #endif 2285 __ delayed()->st(G3_scratch, flags); 2286 2287 __ restore_thread(L7_thread_cache); // restore G2_thread 2288 2289 // Unpack native results. For int-types, we do any needed sign-extension 2290 // and move things into I0. The return value there will survive any VM 2291 // calls for blocking or unlocking. An FP or OOP result (handle) is done 2292 // specially in the slow-path code. 2293 switch (ret_type) { 2294 case T_VOID: break; // Nothing to do! 2295 case T_FLOAT: break; // Got it where we want it (unless slow-path) 2296 case T_DOUBLE: break; // Got it where we want it (unless slow-path) 2297 // In 64 bits build result is in O0, in O0, O1 in 32bit build 2298 case T_LONG: 2299 #ifndef _LP64 2300 __ mov(O1, I1); 2301 #endif 2302 // Fall thru 2303 case T_OBJECT: // Really a handle 2304 case T_ARRAY: 2305 case T_INT: 2306 __ mov(O0, I0); 2307 break; 2308 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false 2309 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break; 2310 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value! 2311 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break; 2312 break; // Cannot de-handlize until after reclaiming jvm_lock 2313 default: 2314 ShouldNotReachHere(); 2315 } 2316 2317 // must we block? 2318 2319 // Block, if necessary, before resuming in _thread_in_Java state. 2320 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2321 { Label no_block; 2322 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); 2323 2324 // Switch thread to "native transition" state before reading the synchronization state. 2325 // This additional state is necessary because reading and testing the synchronization 2326 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2327 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2328 // VM thread changes sync state to synchronizing and suspends threads for GC. 2329 // Thread A is resumed to finish this native method, but doesn't block here since it 2330 // didn't see any synchronization is progress, and escapes. 2331 __ set(_thread_in_native_trans, G3_scratch); 2332 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); 2333 if(os::is_MP()) { 2334 if (UseMembar) { 2335 // Force this write out before the read below 2336 __ membar(Assembler::StoreLoad); 2337 } else { 2338 // Write serialization page so VM thread can do a pseudo remote membar. 2339 // We use the current thread pointer to calculate a thread specific 2340 // offset to write to within the page. This minimizes bus traffic 2341 // due to cache line collision. 2342 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 2343 } 2344 } 2345 __ load_contents(sync_state, G3_scratch); 2346 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 2347 2348 Label L; 2349 Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset())); 2350 __ br(Assembler::notEqual, false, Assembler::pn, L); 2351 __ delayed()-> 2352 ld(suspend_state, G3_scratch); 2353 __ cmp(G3_scratch, 0); 2354 __ br(Assembler::equal, false, Assembler::pt, no_block); 2355 __ delayed()->nop(); 2356 __ bind(L); 2357 2358 // Block. Save any potential method result value before the operation and 2359 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2360 // lets us share the oopMap we used when we went native rather the create 2361 // a distinct one for this pc 2362 // 2363 save_native_result(masm, ret_type, stack_slots); 2364 __ call_VM_leaf(L7_thread_cache, 2365 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2366 G2_thread); 2367 2368 // Restore any method result value 2369 restore_native_result(masm, ret_type, stack_slots); 2370 __ bind(no_block); 2371 } 2372 2373 // thread state is thread_in_native_trans. Any safepoint blocking has already 2374 // happened so we can now change state to _thread_in_Java. 2375 2376 2377 __ set(_thread_in_Java, G3_scratch); 2378 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); 2379 2380 2381 Label no_reguard; 2382 __ ld(G2_thread, in_bytes(JavaThread::stack_guard_state_offset()), G3_scratch); 2383 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled); 2384 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard); 2385 __ delayed()->nop(); 2386 2387 save_native_result(masm, ret_type, stack_slots); 2388 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2389 __ delayed()->nop(); 2390 2391 __ restore_thread(L7_thread_cache); // restore G2_thread 2392 restore_native_result(masm, ret_type, stack_slots); 2393 2394 __ bind(no_reguard); 2395 2396 // Handle possible exception (will unlock if necessary) 2397 2398 // native result if any is live in freg or I0 (and I1 if long and 32bit vm) 2399 2400 // Unlock 2401 if (method->is_synchronized()) { 2402 Label done; 2403 Register I2_ex_oop = I2; 2404 const Register L3_box = L3; 2405 // Get locked oop from the handle we passed to jni 2406 __ ld_ptr(L6_handle, 0, L4); 2407 __ add(SP, lock_offset+STACK_BIAS, L3_box); 2408 // Must save pending exception around the slow-path VM call. Since it's a 2409 // leaf call, the pending exception (if any) can be kept in a register. 2410 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop); 2411 // Now unlock 2412 // (Roop, Rmark, Rbox, Rscratch) 2413 __ compiler_unlock_object(L4, L1, L3_box, L2); 2414 __ br(Assembler::equal, false, Assembler::pt, done); 2415 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box); 2416 2417 // save and restore any potential method result value around the unlocking 2418 // operation. Will save in I0 (or stack for FP returns). 2419 save_native_result(masm, ret_type, stack_slots); 2420 2421 // Must clear pending-exception before re-entering the VM. Since this is 2422 // a leaf call, pending-exception-oop can be safely kept in a register. 2423 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset())); 2424 2425 // slow case of monitor enter. Inline a special case of call_VM that 2426 // disallows any pending_exception. 2427 __ mov(L3_box, O1); 2428 2429 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type); 2430 __ delayed()->mov(L4, O0); // Need oop in O0 2431 2432 __ restore_thread(L7_thread_cache); // restore G2_thread 2433 2434 #ifdef ASSERT 2435 { Label L; 2436 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2437 __ br_null(O0, false, Assembler::pt, L); 2438 __ delayed()->nop(); 2439 __ stop("no pending exception allowed on exit from IR::monitorexit"); 2440 __ bind(L); 2441 } 2442 #endif 2443 restore_native_result(masm, ret_type, stack_slots); 2444 // check_forward_pending_exception jump to forward_exception if any pending 2445 // exception is set. The forward_exception routine expects to see the 2446 // exception in pending_exception and not in a register. Kind of clumsy, 2447 // since all folks who branch to forward_exception must have tested 2448 // pending_exception first and hence have it in a register already. 2449 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset())); 2450 __ bind(done); 2451 } 2452 2453 // Tell dtrace about this method exit 2454 { 2455 SkipIfEqual skip_if( 2456 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2457 save_native_result(masm, ret_type, stack_slots); 2458 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2459 __ call_VM_leaf(L7_thread_cache, 2460 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2461 G2_thread, O1); 2462 restore_native_result(masm, ret_type, stack_slots); 2463 } 2464 2465 // Clear "last Java frame" SP and PC. 2466 __ verify_thread(); // G2_thread must be correct 2467 __ reset_last_Java_frame(); 2468 2469 // Unpack oop result 2470 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2471 Label L; 2472 __ addcc(G0, I0, G0); 2473 __ brx(Assembler::notZero, true, Assembler::pt, L); 2474 __ delayed()->ld_ptr(I0, 0, I0); 2475 __ mov(G0, I0); 2476 __ bind(L); 2477 __ verify_oop(I0); 2478 } 2479 2480 // reset handle block 2481 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2482 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2483 2484 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2485 check_forward_pending_exception(masm, G3_scratch); 2486 2487 2488 // Return 2489 2490 #ifndef _LP64 2491 if (ret_type == T_LONG) { 2492 2493 // Must leave proper result in O0,O1 and G1 (c2/tiered only) 2494 __ sllx(I0, 32, G1); // Shift bits into high G1 2495 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 2496 __ or3 (I1, G1, G1); // OR 64 bits into G1 2497 } 2498 #endif 2499 2500 __ ret(); 2501 __ delayed()->restore(); 2502 2503 __ flush(); 2504 2505 nmethod *nm = nmethod::new_native_nmethod(method, 2506 masm->code(), 2507 vep_offset, 2508 frame_complete, 2509 stack_slots / VMRegImpl::slots_per_word, 2510 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2511 in_ByteSize(lock_offset), 2512 oop_maps); 2513 return nm; 2514 2515 } 2516 2517 #ifdef HAVE_DTRACE_H 2518 // --------------------------------------------------------------------------- 2519 // Generate a dtrace nmethod for a given signature. The method takes arguments 2520 // in the Java compiled code convention, marshals them to the native 2521 // abi and then leaves nops at the position you would expect to call a native 2522 // function. When the probe is enabled the nops are replaced with a trap 2523 // instruction that dtrace inserts and the trace will cause a notification 2524 // to dtrace. 2525 // 2526 // The probes are only able to take primitive types and java/lang/String as 2527 // arguments. No other java types are allowed. Strings are converted to utf8 2528 // strings so that from dtrace point of view java strings are converted to C 2529 // strings. There is an arbitrary fixed limit on the total space that a method 2530 // can use for converting the strings. (256 chars per string in the signature). 2531 // So any java string larger then this is truncated. 2532 2533 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 }; 2534 static bool offsets_initialized = false; 2535 2536 static VMRegPair reg64_to_VMRegPair(Register r) { 2537 VMRegPair ret; 2538 if (wordSize == 8) { 2539 ret.set2(r->as_VMReg()); 2540 } else { 2541 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg()); 2542 } 2543 return ret; 2544 } 2545 2546 2547 nmethod *SharedRuntime::generate_dtrace_nmethod( 2548 MacroAssembler *masm, methodHandle method) { 2549 2550 2551 // generate_dtrace_nmethod is guarded by a mutex so we are sure to 2552 // be single threaded in this method. 2553 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be"); 2554 2555 // Fill in the signature array, for the calling-convention call. 2556 int total_args_passed = method->size_of_parameters(); 2557 2558 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); 2559 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); 2560 2561 // The signature we are going to use for the trap that dtrace will see 2562 // java/lang/String is converted. We drop "this" and any other object 2563 // is converted to NULL. (A one-slot java/lang/Long object reference 2564 // is converted to a two-slot long, which is why we double the allocation). 2565 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2); 2566 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2); 2567 2568 int i=0; 2569 int total_strings = 0; 2570 int first_arg_to_pass = 0; 2571 int total_c_args = 0; 2572 2573 // Skip the receiver as dtrace doesn't want to see it 2574 if( !method->is_static() ) { 2575 in_sig_bt[i++] = T_OBJECT; 2576 first_arg_to_pass = 1; 2577 } 2578 2579 SignatureStream ss(method->signature()); 2580 for ( ; !ss.at_return_type(); ss.next()) { 2581 BasicType bt = ss.type(); 2582 in_sig_bt[i++] = bt; // Collect remaining bits of signature 2583 out_sig_bt[total_c_args++] = bt; 2584 if( bt == T_OBJECT) { 2585 symbolOop s = ss.as_symbol_or_null(); 2586 if (s == vmSymbols::java_lang_String()) { 2587 total_strings++; 2588 out_sig_bt[total_c_args-1] = T_ADDRESS; 2589 } else if (s == vmSymbols::java_lang_Boolean() || 2590 s == vmSymbols::java_lang_Byte()) { 2591 out_sig_bt[total_c_args-1] = T_BYTE; 2592 } else if (s == vmSymbols::java_lang_Character() || 2593 s == vmSymbols::java_lang_Short()) { 2594 out_sig_bt[total_c_args-1] = T_SHORT; 2595 } else if (s == vmSymbols::java_lang_Integer() || 2596 s == vmSymbols::java_lang_Float()) { 2597 out_sig_bt[total_c_args-1] = T_INT; 2598 } else if (s == vmSymbols::java_lang_Long() || 2599 s == vmSymbols::java_lang_Double()) { 2600 out_sig_bt[total_c_args-1] = T_LONG; 2601 out_sig_bt[total_c_args++] = T_VOID; 2602 } 2603 } else if ( bt == T_LONG || bt == T_DOUBLE ) { 2604 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots 2605 // We convert double to long 2606 out_sig_bt[total_c_args-1] = T_LONG; 2607 out_sig_bt[total_c_args++] = T_VOID; 2608 } else if ( bt == T_FLOAT) { 2609 // We convert float to int 2610 out_sig_bt[total_c_args-1] = T_INT; 2611 } 2612 } 2613 2614 assert(i==total_args_passed, "validly parsed signature"); 2615 2616 // Now get the compiled-Java layout as input arguments 2617 int comp_args_on_stack; 2618 comp_args_on_stack = SharedRuntime::java_calling_convention( 2619 in_sig_bt, in_regs, total_args_passed, false); 2620 2621 // We have received a description of where all the java arg are located 2622 // on entry to the wrapper. We need to convert these args to where 2623 // the a native (non-jni) function would expect them. To figure out 2624 // where they go we convert the java signature to a C signature and remove 2625 // T_VOID for any long/double we might have received. 2626 2627 2628 // Now figure out where the args must be stored and how much stack space 2629 // they require (neglecting out_preserve_stack_slots but space for storing 2630 // the 1st six register arguments). It's weird see int_stk_helper. 2631 // 2632 int out_arg_slots; 2633 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 2634 2635 // Calculate the total number of stack slots we will need. 2636 2637 // First count the abi requirement plus all of the outgoing args 2638 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2639 2640 // Plus a temp for possible converion of float/double/long register args 2641 2642 int conversion_temp = stack_slots; 2643 stack_slots += 2; 2644 2645 2646 // Now space for the string(s) we must convert 2647 2648 int string_locs = stack_slots; 2649 stack_slots += total_strings * 2650 (max_dtrace_string_size / VMRegImpl::stack_slot_size); 2651 2652 // Ok The space we have allocated will look like: 2653 // 2654 // 2655 // FP-> | | 2656 // |---------------------| 2657 // | string[n] | 2658 // |---------------------| <- string_locs[n] 2659 // | string[n-1] | 2660 // |---------------------| <- string_locs[n-1] 2661 // | ... | 2662 // | ... | 2663 // |---------------------| <- string_locs[1] 2664 // | string[0] | 2665 // |---------------------| <- string_locs[0] 2666 // | temp | 2667 // |---------------------| <- conversion_temp 2668 // | outbound memory | 2669 // | based arguments | 2670 // | | 2671 // |---------------------| 2672 // | | 2673 // SP-> | out_preserved_slots | 2674 // 2675 // 2676 2677 // Now compute actual number of stack words we need rounding to make 2678 // stack properly aligned. 2679 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word); 2680 2681 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2682 2683 intptr_t start = (intptr_t)__ pc(); 2684 2685 // First thing make an ic check to see if we should even be here 2686 2687 { 2688 Label L; 2689 const Register temp_reg = G3_scratch; 2690 Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); 2691 __ verify_oop(O0); 2692 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg); 2693 __ cmp(temp_reg, G5_inline_cache_reg); 2694 __ brx(Assembler::equal, true, Assembler::pt, L); 2695 __ delayed()->nop(); 2696 2697 __ jump_to(ic_miss, 0); 2698 __ delayed()->nop(); 2699 __ align(CodeEntryAlignment); 2700 __ bind(L); 2701 } 2702 2703 int vep_offset = ((intptr_t)__ pc()) - start; 2704 2705 2706 // The instruction at the verified entry point must be 5 bytes or longer 2707 // because it can be patched on the fly by make_non_entrant. The stack bang 2708 // instruction fits that requirement. 2709 2710 // Generate stack overflow check before creating frame 2711 __ generate_stack_overflow_check(stack_size); 2712 2713 assert(((intptr_t)__ pc() - start - vep_offset) >= 5, 2714 "valid size for make_non_entrant"); 2715 2716 // Generate a new frame for the wrapper. 2717 __ save(SP, -stack_size, SP); 2718 2719 // Frame is now completed as far a size and linkage. 2720 2721 int frame_complete = ((intptr_t)__ pc()) - start; 2722 2723 #ifdef ASSERT 2724 bool reg_destroyed[RegisterImpl::number_of_registers]; 2725 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2726 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2727 reg_destroyed[r] = false; 2728 } 2729 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2730 freg_destroyed[f] = false; 2731 } 2732 2733 #endif /* ASSERT */ 2734 2735 VMRegPair zero; 2736 const Register g0 = G0; // without this we get a compiler warning (why??) 2737 zero.set2(g0->as_VMReg()); 2738 2739 int c_arg, j_arg; 2740 2741 Register conversion_off = noreg; 2742 2743 for (j_arg = first_arg_to_pass, c_arg = 0 ; 2744 j_arg < total_args_passed ; j_arg++, c_arg++ ) { 2745 2746 VMRegPair src = in_regs[j_arg]; 2747 VMRegPair dst = out_regs[c_arg]; 2748 2749 #ifdef ASSERT 2750 if (src.first()->is_Register()) { 2751 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!"); 2752 } else if (src.first()->is_FloatRegister()) { 2753 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding( 2754 FloatRegisterImpl::S)], "ack!"); 2755 } 2756 if (dst.first()->is_Register()) { 2757 reg_destroyed[dst.first()->as_Register()->encoding()] = true; 2758 } else if (dst.first()->is_FloatRegister()) { 2759 freg_destroyed[dst.first()->as_FloatRegister()->encoding( 2760 FloatRegisterImpl::S)] = true; 2761 } 2762 #endif /* ASSERT */ 2763 2764 switch (in_sig_bt[j_arg]) { 2765 case T_ARRAY: 2766 case T_OBJECT: 2767 { 2768 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT || 2769 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) { 2770 // need to unbox a one-slot value 2771 Register in_reg = L0; 2772 Register tmp = L2; 2773 if ( src.first()->is_reg() ) { 2774 in_reg = src.first()->as_Register(); 2775 } else { 2776 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS), 2777 "must be"); 2778 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg); 2779 } 2780 // If the final destination is an acceptable register 2781 if ( dst.first()->is_reg() ) { 2782 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) { 2783 tmp = dst.first()->as_Register(); 2784 } 2785 } 2786 2787 Label skipUnbox; 2788 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) { 2789 __ mov(G0, tmp->successor()); 2790 } 2791 __ br_null(in_reg, true, Assembler::pn, skipUnbox); 2792 __ delayed()->mov(G0, tmp); 2793 2794 BasicType bt = out_sig_bt[c_arg]; 2795 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt); 2796 switch (bt) { 2797 case T_BYTE: 2798 __ ldub(in_reg, box_offset, tmp); break; 2799 case T_SHORT: 2800 __ lduh(in_reg, box_offset, tmp); break; 2801 case T_INT: 2802 __ ld(in_reg, box_offset, tmp); break; 2803 case T_LONG: 2804 __ ld_long(in_reg, box_offset, tmp); break; 2805 default: ShouldNotReachHere(); 2806 } 2807 2808 __ bind(skipUnbox); 2809 // If tmp wasn't final destination copy to final destination 2810 if (tmp == L2) { 2811 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2); 2812 if (out_sig_bt[c_arg] == T_LONG) { 2813 long_move(masm, tmp_as_VM, dst); 2814 } else { 2815 move32_64(masm, tmp_as_VM, out_regs[c_arg]); 2816 } 2817 } 2818 if (out_sig_bt[c_arg] == T_LONG) { 2819 assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); 2820 ++c_arg; // move over the T_VOID to keep the loop indices in sync 2821 } 2822 } else if (out_sig_bt[c_arg] == T_ADDRESS) { 2823 Register s = 2824 src.first()->is_reg() ? src.first()->as_Register() : L2; 2825 Register d = 2826 dst.first()->is_reg() ? dst.first()->as_Register() : L2; 2827 2828 // We store the oop now so that the conversion pass can reach 2829 // while in the inner frame. This will be the only store if 2830 // the oop is NULL. 2831 if (s != L2) { 2832 // src is register 2833 if (d != L2) { 2834 // dst is register 2835 __ mov(s, d); 2836 } else { 2837 assert(Assembler::is_simm13(reg2offset(dst.first()) + 2838 STACK_BIAS), "must be"); 2839 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS); 2840 } 2841 } else { 2842 // src not a register 2843 assert(Assembler::is_simm13(reg2offset(src.first()) + 2844 STACK_BIAS), "must be"); 2845 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d); 2846 if (d == L2) { 2847 assert(Assembler::is_simm13(reg2offset(dst.first()) + 2848 STACK_BIAS), "must be"); 2849 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS); 2850 } 2851 } 2852 } else if (out_sig_bt[c_arg] != T_VOID) { 2853 // Convert the arg to NULL 2854 if (dst.first()->is_reg()) { 2855 __ mov(G0, dst.first()->as_Register()); 2856 } else { 2857 assert(Assembler::is_simm13(reg2offset(dst.first()) + 2858 STACK_BIAS), "must be"); 2859 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS); 2860 } 2861 } 2862 } 2863 break; 2864 case T_VOID: 2865 break; 2866 2867 case T_FLOAT: 2868 if (src.first()->is_stack()) { 2869 // Stack to stack/reg is simple 2870 move32_64(masm, src, dst); 2871 } else { 2872 if (dst.first()->is_reg()) { 2873 // freg -> reg 2874 int off = 2875 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 2876 Register d = dst.first()->as_Register(); 2877 if (Assembler::is_simm13(off)) { 2878 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2879 SP, off); 2880 __ ld(SP, off, d); 2881 } else { 2882 if (conversion_off == noreg) { 2883 __ set(off, L6); 2884 conversion_off = L6; 2885 } 2886 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2887 SP, conversion_off); 2888 __ ld(SP, conversion_off , d); 2889 } 2890 } else { 2891 // freg -> mem 2892 int off = STACK_BIAS + reg2offset(dst.first()); 2893 if (Assembler::is_simm13(off)) { 2894 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2895 SP, off); 2896 } else { 2897 if (conversion_off == noreg) { 2898 __ set(off, L6); 2899 conversion_off = L6; 2900 } 2901 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2902 SP, conversion_off); 2903 } 2904 } 2905 } 2906 break; 2907 2908 case T_DOUBLE: 2909 assert( j_arg + 1 < total_args_passed && 2910 in_sig_bt[j_arg + 1] == T_VOID && 2911 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2912 if (src.first()->is_stack()) { 2913 // Stack to stack/reg is simple 2914 long_move(masm, src, dst); 2915 } else { 2916 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2; 2917 2918 // Destination could be an odd reg on 32bit in which case 2919 // we can't load direct to the destination. 2920 2921 if (!d->is_even() && wordSize == 4) { 2922 d = L2; 2923 } 2924 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 2925 if (Assembler::is_simm13(off)) { 2926 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 2927 SP, off); 2928 __ ld_long(SP, off, d); 2929 } else { 2930 if (conversion_off == noreg) { 2931 __ set(off, L6); 2932 conversion_off = L6; 2933 } 2934 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 2935 SP, conversion_off); 2936 __ ld_long(SP, conversion_off, d); 2937 } 2938 if (d == L2) { 2939 long_move(masm, reg64_to_VMRegPair(L2), dst); 2940 } 2941 } 2942 break; 2943 2944 case T_LONG : 2945 // 32bit can't do a split move of something like g1 -> O0, O1 2946 // so use a memory temp 2947 if (src.is_single_phys_reg() && wordSize == 4) { 2948 Register tmp = L2; 2949 if (dst.first()->is_reg() && 2950 (wordSize == 8 || dst.first()->as_Register()->is_even())) { 2951 tmp = dst.first()->as_Register(); 2952 } 2953 2954 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 2955 if (Assembler::is_simm13(off)) { 2956 __ stx(src.first()->as_Register(), SP, off); 2957 __ ld_long(SP, off, tmp); 2958 } else { 2959 if (conversion_off == noreg) { 2960 __ set(off, L6); 2961 conversion_off = L6; 2962 } 2963 __ stx(src.first()->as_Register(), SP, conversion_off); 2964 __ ld_long(SP, conversion_off, tmp); 2965 } 2966 2967 if (tmp == L2) { 2968 long_move(masm, reg64_to_VMRegPair(L2), dst); 2969 } 2970 } else { 2971 long_move(masm, src, dst); 2972 } 2973 break; 2974 2975 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2976 2977 default: 2978 move32_64(masm, src, dst); 2979 } 2980 } 2981 2982 2983 // If we have any strings we must store any register based arg to the stack 2984 // This includes any still live xmm registers too. 2985 2986 if (total_strings > 0 ) { 2987 2988 // protect all the arg registers 2989 __ save_frame(0); 2990 __ mov(G2_thread, L7_thread_cache); 2991 const Register L2_string_off = L2; 2992 2993 // Get first string offset 2994 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off); 2995 2996 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) { 2997 if (out_sig_bt[c_arg] == T_ADDRESS) { 2998 2999 VMRegPair dst = out_regs[c_arg]; 3000 const Register d = dst.first()->is_reg() ? 3001 dst.first()->as_Register()->after_save() : noreg; 3002 3003 // It's a string the oop and it was already copied to the out arg 3004 // position 3005 if (d != noreg) { 3006 __ mov(d, O0); 3007 } else { 3008 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 3009 "must be"); 3010 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0); 3011 } 3012 Label skip; 3013 3014 __ br_null(O0, false, Assembler::pn, skip); 3015 __ delayed()->add(FP, L2_string_off, O1); 3016 3017 if (d != noreg) { 3018 __ mov(O1, d); 3019 } else { 3020 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 3021 "must be"); 3022 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS); 3023 } 3024 3025 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf), 3026 relocInfo::runtime_call_type); 3027 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off); 3028 3029 __ bind(skip); 3030 3031 } 3032 3033 } 3034 __ mov(L7_thread_cache, G2_thread); 3035 __ restore(); 3036 3037 } 3038 3039 3040 // Ok now we are done. Need to place the nop that dtrace wants in order to 3041 // patch in the trap 3042 3043 int patch_offset = ((intptr_t)__ pc()) - start; 3044 3045 __ nop(); 3046 3047 3048 // Return 3049 3050 __ ret(); 3051 __ delayed()->restore(); 3052 3053 __ flush(); 3054 3055 nmethod *nm = nmethod::new_dtrace_nmethod( 3056 method, masm->code(), vep_offset, patch_offset, frame_complete, 3057 stack_slots / VMRegImpl::slots_per_word); 3058 return nm; 3059 3060 } 3061 3062 #endif // HAVE_DTRACE_H 3063 3064 // this function returns the adjust size (in number of words) to a c2i adapter 3065 // activation for use during deoptimization 3066 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 3067 assert(callee_locals >= callee_parameters, 3068 "test and remove; got more parms than locals"); 3069 if (callee_locals < callee_parameters) 3070 return 0; // No adjustment for negative locals 3071 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords(); 3072 return round_to(diff, WordsPerLong); 3073 } 3074 3075 // "Top of Stack" slots that may be unused by the calling convention but must 3076 // otherwise be preserved. 3077 // On Intel these are not necessary and the value can be zero. 3078 // On Sparc this describes the words reserved for storing a register window 3079 // when an interrupt occurs. 3080 uint SharedRuntime::out_preserve_stack_slots() { 3081 return frame::register_save_words * VMRegImpl::slots_per_word; 3082 } 3083 3084 static void gen_new_frame(MacroAssembler* masm, bool deopt) { 3085 // 3086 // Common out the new frame generation for deopt and uncommon trap 3087 // 3088 Register G3pcs = G3_scratch; // Array of new pcs (input) 3089 Register Oreturn0 = O0; 3090 Register Oreturn1 = O1; 3091 Register O2UnrollBlock = O2; 3092 Register O3array = O3; // Array of frame sizes (input) 3093 Register O4array_size = O4; // number of frames (input) 3094 Register O7frame_size = O7; // number of frames (input) 3095 3096 __ ld_ptr(O3array, 0, O7frame_size); 3097 __ sub(G0, O7frame_size, O7frame_size); 3098 __ save(SP, O7frame_size, SP); 3099 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc 3100 3101 #ifdef ASSERT 3102 // make sure that the frames are aligned properly 3103 #ifndef _LP64 3104 __ btst(wordSize*2-1, SP); 3105 __ breakpoint_trap(Assembler::notZero); 3106 #endif 3107 #endif 3108 3109 // Deopt needs to pass some extra live values from frame to frame 3110 3111 if (deopt) { 3112 __ mov(Oreturn0->after_save(), Oreturn0); 3113 __ mov(Oreturn1->after_save(), Oreturn1); 3114 } 3115 3116 __ mov(O4array_size->after_save(), O4array_size); 3117 __ sub(O4array_size, 1, O4array_size); 3118 __ mov(O3array->after_save(), O3array); 3119 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock); 3120 __ add(G3pcs, wordSize, G3pcs); // point to next pc value 3121 3122 #ifdef ASSERT 3123 // trash registers to show a clear pattern in backtraces 3124 __ set(0xDEAD0000, I0); 3125 __ add(I0, 2, I1); 3126 __ add(I0, 4, I2); 3127 __ add(I0, 6, I3); 3128 __ add(I0, 8, I4); 3129 // Don't touch I5 could have valuable savedSP 3130 __ set(0xDEADBEEF, L0); 3131 __ mov(L0, L1); 3132 __ mov(L0, L2); 3133 __ mov(L0, L3); 3134 __ mov(L0, L4); 3135 __ mov(L0, L5); 3136 3137 // trash the return value as there is nothing to return yet 3138 __ set(0xDEAD0001, O7); 3139 #endif 3140 3141 __ mov(SP, O5_savedSP); 3142 } 3143 3144 3145 static void make_new_frames(MacroAssembler* masm, bool deopt) { 3146 // 3147 // loop through the UnrollBlock info and create new frames 3148 // 3149 Register G3pcs = G3_scratch; 3150 Register Oreturn0 = O0; 3151 Register Oreturn1 = O1; 3152 Register O2UnrollBlock = O2; 3153 Register O3array = O3; 3154 Register O4array_size = O4; 3155 Label loop; 3156 3157 // Before we make new frames, check to see if stack is available. 3158 // Do this after the caller's return address is on top of stack 3159 if (UseStackBanging) { 3160 // Get total frame size for interpreted frames 3161 __ ld(Address(O2UnrollBlock, 0, 3162 Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()), O4); 3163 __ bang_stack_size(O4, O3, G3_scratch); 3164 } 3165 3166 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()), O4array_size); 3167 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()), G3pcs); 3168 3169 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()), O3array); 3170 3171 // Adjust old interpreter frame to make space for new frame's extra java locals 3172 // 3173 // We capture the original sp for the transition frame only because it is needed in 3174 // order to properly calculate interpreter_sp_adjustment. Even though in real life 3175 // every interpreter frame captures a savedSP it is only needed at the transition 3176 // (fortunately). If we had to have it correct everywhere then we would need to 3177 // be told the sp_adjustment for each frame we create. If the frame size array 3178 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 3179 // for each frame we create and keep up the illusion every where. 3180 // 3181 3182 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()), O7); 3183 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 3184 __ sub(SP, O7, SP); 3185 3186 #ifdef ASSERT 3187 // make sure that there is at least one entry in the array 3188 __ tst(O4array_size); 3189 __ breakpoint_trap(Assembler::zero); 3190 #endif 3191 3192 // Now push the new interpreter frames 3193 __ bind(loop); 3194 3195 // allocate a new frame, filling the registers 3196 3197 gen_new_frame(masm, deopt); // allocate an interpreter frame 3198 3199 __ tst(O4array_size); 3200 __ br(Assembler::notZero, false, Assembler::pn, loop); 3201 __ delayed()->add(O3array, wordSize, O3array); 3202 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc 3203 3204 } 3205 3206 //------------------------------generate_deopt_blob---------------------------- 3207 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3208 // instead. 3209 void SharedRuntime::generate_deopt_blob() { 3210 // allocate space for the code 3211 ResourceMark rm; 3212 // setup code generation tools 3213 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 3214 #ifdef _LP64 3215 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 3216 #else 3217 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread) 3218 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread) 3219 CodeBuffer buffer("deopt_blob", 1600+pad, 512); 3220 #endif /* _LP64 */ 3221 MacroAssembler* masm = new MacroAssembler(&buffer); 3222 FloatRegister Freturn0 = F0; 3223 Register Greturn1 = G1; 3224 Register Oreturn0 = O0; 3225 Register Oreturn1 = O1; 3226 Register O2UnrollBlock = O2; 3227 Register O3tmp = O3; 3228 Register I5exception_tmp = I5; 3229 Register G4exception_tmp = G4_scratch; 3230 int frame_size_words; 3231 Address saved_Freturn0_addr(FP, 0, -sizeof(double) + STACK_BIAS); 3232 #if !defined(_LP64) && defined(COMPILER2) 3233 Address saved_Greturn1_addr(FP, 0, -sizeof(double) -sizeof(jlong) + STACK_BIAS); 3234 #endif 3235 Label cont; 3236 3237 OopMapSet *oop_maps = new OopMapSet(); 3238 3239 // 3240 // This is the entry point for code which is returning to a de-optimized 3241 // frame. 3242 // The steps taken by this frame are as follows: 3243 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1) 3244 // and all potentially live registers (at a pollpoint many registers can be live). 3245 // 3246 // - call the C routine: Deoptimization::fetch_unroll_info (this function 3247 // returns information about the number and size of interpreter frames 3248 // which are equivalent to the frame which is being deoptimized) 3249 // - deallocate the unpack frame, restoring only results values. Other 3250 // volatile registers will now be captured in the vframeArray as needed. 3251 // - deallocate the deoptimization frame 3252 // - in a loop using the information returned in the previous step 3253 // push new interpreter frames (take care to propagate the return 3254 // values through each new frame pushed) 3255 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0) 3256 // - call the C routine: Deoptimization::unpack_frames (this function 3257 // lays out values on the interpreter frame which was just created) 3258 // - deallocate the dummy unpack_frame 3259 // - ensure that all the return values are correctly set and then do 3260 // a return to the interpreter entry point 3261 // 3262 // Refer to the following methods for more information: 3263 // - Deoptimization::fetch_unroll_info 3264 // - Deoptimization::unpack_frames 3265 3266 OopMap* map = NULL; 3267 3268 int start = __ offset(); 3269 3270 // restore G2, the trampoline destroyed it 3271 __ get_thread(); 3272 3273 // On entry we have been called by the deoptimized nmethod with a call that 3274 // replaced the original call (or safepoint polling location) so the deoptimizing 3275 // pc is now in O7. Return values are still in the expected places 3276 3277 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3278 __ ba(false, cont); 3279 __ delayed()->mov(Deoptimization::Unpack_deopt, I5exception_tmp); 3280 3281 int exception_offset = __ offset() - start; 3282 3283 // restore G2, the trampoline destroyed it 3284 __ get_thread(); 3285 3286 // On entry we have been jumped to by the exception handler (or exception_blob 3287 // for server). O0 contains the exception oop and O7 contains the original 3288 // exception pc. So if we push a frame here it will look to the 3289 // stack walking code (fetch_unroll_info) just like a normal call so 3290 // state will be extracted normally. 3291 3292 // save exception oop in JavaThread and fall through into the 3293 // exception_in_tls case since they are handled in same way except 3294 // for where the pending exception is kept. 3295 __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3296 3297 // 3298 // Vanilla deoptimization with an exception pending in exception_oop 3299 // 3300 int exception_in_tls_offset = __ offset() - start; 3301 3302 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3303 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3304 3305 // Restore G2_thread 3306 __ get_thread(); 3307 3308 #ifdef ASSERT 3309 { 3310 // verify that there is really an exception oop in exception_oop 3311 Label has_exception; 3312 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3313 __ br_notnull(Oexception, false, Assembler::pt, has_exception); 3314 __ delayed()-> nop(); 3315 __ stop("no exception in thread"); 3316 __ bind(has_exception); 3317 3318 // verify that there is no pending exception 3319 Label no_pending_exception; 3320 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); 3321 __ ld_ptr(exception_addr, Oexception); 3322 __ br_null(Oexception, false, Assembler::pt, no_pending_exception); 3323 __ delayed()->nop(); 3324 __ stop("must not have pending exception here"); 3325 __ bind(no_pending_exception); 3326 } 3327 #endif 3328 3329 __ ba(false, cont); 3330 __ delayed()->mov(Deoptimization::Unpack_exception, I5exception_tmp);; 3331 3332 // 3333 // Reexecute entry, similar to c2 uncommon trap 3334 // 3335 int reexecute_offset = __ offset() - start; 3336 3337 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3338 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3339 3340 __ mov(Deoptimization::Unpack_reexecute, I5exception_tmp); 3341 3342 __ bind(cont); 3343 3344 __ set_last_Java_frame(SP, noreg); 3345 3346 // do the call by hand so we can get the oopmap 3347 3348 __ mov(G2_thread, L7_thread_cache); 3349 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); 3350 __ delayed()->mov(G2_thread, O0); 3351 3352 // Set an oopmap for the call site this describes all our saved volatile registers 3353 3354 oop_maps->add_gc_map( __ offset()-start, map); 3355 3356 __ mov(L7_thread_cache, G2_thread); 3357 3358 __ reset_last_Java_frame(); 3359 3360 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers 3361 // so this move will survive 3362 3363 __ mov(I5exception_tmp, G4exception_tmp); 3364 3365 __ mov(O0, O2UnrollBlock->after_save()); 3366 3367 RegisterSaver::restore_result_registers(masm); 3368 3369 Label noException; 3370 __ cmp(G4exception_tmp, Deoptimization::Unpack_exception); // Was exception pending? 3371 __ br(Assembler::notEqual, false, Assembler::pt, noException); 3372 __ delayed()->nop(); 3373 3374 // Move the pending exception from exception_oop to Oexception so 3375 // the pending exception will be picked up the interpreter. 3376 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3377 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3378 __ bind(noException); 3379 3380 // deallocate the deoptimization frame taking care to preserve the return values 3381 __ mov(Oreturn0, Oreturn0->after_save()); 3382 __ mov(Oreturn1, Oreturn1->after_save()); 3383 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3384 __ restore(); 3385 3386 // Allocate new interpreter frame(s) and possible c2i adapter frame 3387 3388 make_new_frames(masm, true); 3389 3390 // push a dummy "unpack_frame" taking care of float return values and 3391 // call Deoptimization::unpack_frames to have the unpacker layout 3392 // information in the interpreter frames just created and then return 3393 // to the interpreter entry point 3394 __ save(SP, -frame_size_words*wordSize, SP); 3395 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); 3396 #if !defined(_LP64) 3397 #if defined(COMPILER2) 3398 if (!TieredCompilation) { 3399 // 32-bit 1-register longs return longs in G1 3400 __ stx(Greturn1, saved_Greturn1_addr); 3401 } 3402 #endif 3403 __ set_last_Java_frame(SP, noreg); 3404 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4exception_tmp); 3405 #else 3406 // LP64 uses g4 in set_last_Java_frame 3407 __ mov(G4exception_tmp, O1); 3408 __ set_last_Java_frame(SP, G0); 3409 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); 3410 #endif 3411 __ reset_last_Java_frame(); 3412 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); 3413 3414 // In tiered we never use C2 to compile methods returning longs so 3415 // the result is where we expect it already. 3416 3417 #if !defined(_LP64) && defined(COMPILER2) 3418 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into 3419 // I0/I1 if the return value is long. In the tiered world there is 3420 // a mismatch between how C1 and C2 return longs compiles and so 3421 // currently compilation of methods which return longs is disabled 3422 // for C2 and so is this code. Eventually C1 and C2 will do the 3423 // same thing for longs in the tiered world. 3424 if (!TieredCompilation) { 3425 Label not_long; 3426 __ cmp(O0,T_LONG); 3427 __ br(Assembler::notEqual, false, Assembler::pt, not_long); 3428 __ delayed()->nop(); 3429 __ ldd(saved_Greturn1_addr,I0); 3430 __ bind(not_long); 3431 } 3432 #endif 3433 __ ret(); 3434 __ delayed()->restore(); 3435 3436 masm->flush(); 3437 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words); 3438 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3439 } 3440 3441 #ifdef COMPILER2 3442 3443 //------------------------------generate_uncommon_trap_blob-------------------- 3444 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3445 // instead. 3446 void SharedRuntime::generate_uncommon_trap_blob() { 3447 // allocate space for the code 3448 ResourceMark rm; 3449 // setup code generation tools 3450 int pad = VerifyThread ? 512 : 0; 3451 #ifdef _LP64 3452 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 3453 #else 3454 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread) 3455 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread) 3456 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512); 3457 #endif 3458 MacroAssembler* masm = new MacroAssembler(&buffer); 3459 Register O2UnrollBlock = O2; 3460 Register O3tmp = O3; 3461 Register O2klass_index = O2; 3462 3463 // 3464 // This is the entry point for all traps the compiler takes when it thinks 3465 // it cannot handle further execution of compilation code. The frame is 3466 // deoptimized in these cases and converted into interpreter frames for 3467 // execution 3468 // The steps taken by this frame are as follows: 3469 // - push a fake "unpack_frame" 3470 // - call the C routine Deoptimization::uncommon_trap (this function 3471 // packs the current compiled frame into vframe arrays and returns 3472 // information about the number and size of interpreter frames which 3473 // are equivalent to the frame which is being deoptimized) 3474 // - deallocate the "unpack_frame" 3475 // - deallocate the deoptimization frame 3476 // - in a loop using the information returned in the previous step 3477 // push interpreter frames; 3478 // - create a dummy "unpack_frame" 3479 // - call the C routine: Deoptimization::unpack_frames (this function 3480 // lays out values on the interpreter frame which was just created) 3481 // - deallocate the dummy unpack_frame 3482 // - return to the interpreter entry point 3483 // 3484 // Refer to the following methods for more information: 3485 // - Deoptimization::uncommon_trap 3486 // - Deoptimization::unpack_frame 3487 3488 // the unloaded class index is in O0 (first parameter to this blob) 3489 3490 // push a dummy "unpack_frame" 3491 // and call Deoptimization::uncommon_trap to pack the compiled frame into 3492 // vframe array and return the UnrollBlock information 3493 __ save_frame(0); 3494 __ set_last_Java_frame(SP, noreg); 3495 __ mov(I0, O2klass_index); 3496 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index); 3497 __ reset_last_Java_frame(); 3498 __ mov(O0, O2UnrollBlock->after_save()); 3499 __ restore(); 3500 3501 // deallocate the deoptimized frame taking care to preserve the return values 3502 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3503 __ restore(); 3504 3505 // Allocate new interpreter frame(s) and possible c2i adapter frame 3506 3507 make_new_frames(masm, false); 3508 3509 // push a dummy "unpack_frame" taking care of float return values and 3510 // call Deoptimization::unpack_frames to have the unpacker layout 3511 // information in the interpreter frames just created and then return 3512 // to the interpreter entry point 3513 __ save_frame(0); 3514 __ set_last_Java_frame(SP, noreg); 3515 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case 3516 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3); 3517 __ reset_last_Java_frame(); 3518 __ ret(); 3519 __ delayed()->restore(); 3520 3521 masm->flush(); 3522 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize); 3523 } 3524 3525 #endif // COMPILER2 3526 3527 //------------------------------generate_handler_blob------------------- 3528 // 3529 // Generate a special Compile2Runtime blob that saves all registers, and sets 3530 // up an OopMap. 3531 // 3532 // This blob is jumped to (via a breakpoint and the signal handler) from a 3533 // safepoint in compiled code. On entry to this blob, O7 contains the 3534 // address in the original nmethod at which we should resume normal execution. 3535 // Thus, this blob looks like a subroutine which must preserve lots of 3536 // registers and return normally. Note that O7 is never register-allocated, 3537 // so it is guaranteed to be free here. 3538 // 3539 3540 // The hardest part of what this blob must do is to save the 64-bit %o 3541 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and 3542 // an interrupt will chop off their heads. Making space in the caller's frame 3543 // first will let us save the 64-bit %o's before save'ing, but we cannot hand 3544 // the adjusted FP off to the GC stack-crawler: this will modify the caller's 3545 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save 3546 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). 3547 // Tricky, tricky, tricky... 3548 3549 static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) { 3550 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3551 3552 // allocate space for the code 3553 ResourceMark rm; 3554 // setup code generation tools 3555 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3556 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3557 // even larger with TraceJumps 3558 int pad = TraceJumps ? 512 : 0; 3559 CodeBuffer buffer("handler_blob", 1600 + pad, 512); 3560 MacroAssembler* masm = new MacroAssembler(&buffer); 3561 int frame_size_words; 3562 OopMapSet *oop_maps = new OopMapSet(); 3563 OopMap* map = NULL; 3564 3565 int start = __ offset(); 3566 3567 // If this causes a return before the processing, then do a "restore" 3568 if (cause_return) { 3569 __ restore(); 3570 } else { 3571 // Make it look like we were called via the poll 3572 // so that frame constructor always sees a valid return address 3573 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7); 3574 __ sub(O7, frame::pc_return_offset, O7); 3575 } 3576 3577 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3578 3579 // setup last_Java_sp (blows G4) 3580 __ set_last_Java_frame(SP, noreg); 3581 3582 // call into the runtime to handle illegal instructions exception 3583 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3584 __ mov(G2_thread, O0); 3585 __ save_thread(L7_thread_cache); 3586 __ call(call_ptr); 3587 __ delayed()->nop(); 3588 3589 // Set an oopmap for the call site. 3590 // We need this not only for callee-saved registers, but also for volatile 3591 // registers that the compiler might be keeping live across a safepoint. 3592 3593 oop_maps->add_gc_map( __ offset() - start, map); 3594 3595 __ restore_thread(L7_thread_cache); 3596 // clear last_Java_sp 3597 __ reset_last_Java_frame(); 3598 3599 // Check for exceptions 3600 Label pending; 3601 3602 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3603 __ tst(O1); 3604 __ brx(Assembler::notEqual, true, Assembler::pn, pending); 3605 __ delayed()->nop(); 3606 3607 RegisterSaver::restore_live_registers(masm); 3608 3609 // We are back the the original state on entry and ready to go. 3610 3611 __ retl(); 3612 __ delayed()->nop(); 3613 3614 // Pending exception after the safepoint 3615 3616 __ bind(pending); 3617 3618 RegisterSaver::restore_live_registers(masm); 3619 3620 // We are back the the original state on entry. 3621 3622 // Tail-call forward_exception_entry, with the issuing PC in O7, 3623 // so it looks like the original nmethod called forward_exception_entry. 3624 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3625 __ JMP(O0, 0); 3626 __ delayed()->nop(); 3627 3628 // ------------- 3629 // make sure all code is generated 3630 masm->flush(); 3631 3632 // return exception blob 3633 return SafepointBlob::create(&buffer, oop_maps, frame_size_words); 3634 } 3635 3636 // 3637 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3638 // 3639 // Generate a stub that calls into vm to find out the proper destination 3640 // of a java call. All the argument registers are live at this point 3641 // but since this is generic code we don't know what they are and the caller 3642 // must do any gc of the args. 3643 // 3644 static RuntimeStub* generate_resolve_blob(address destination, const char* name) { 3645 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3646 3647 // allocate space for the code 3648 ResourceMark rm; 3649 // setup code generation tools 3650 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3651 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3652 // even larger with TraceJumps 3653 int pad = TraceJumps ? 512 : 0; 3654 CodeBuffer buffer(name, 1600 + pad, 512); 3655 MacroAssembler* masm = new MacroAssembler(&buffer); 3656 int frame_size_words; 3657 OopMapSet *oop_maps = new OopMapSet(); 3658 OopMap* map = NULL; 3659 3660 int start = __ offset(); 3661 3662 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3663 3664 int frame_complete = __ offset(); 3665 3666 // setup last_Java_sp (blows G4) 3667 __ set_last_Java_frame(SP, noreg); 3668 3669 // call into the runtime to handle illegal instructions exception 3670 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3671 __ mov(G2_thread, O0); 3672 __ save_thread(L7_thread_cache); 3673 __ call(destination, relocInfo::runtime_call_type); 3674 __ delayed()->nop(); 3675 3676 // O0 contains the address we are going to jump to assuming no exception got installed 3677 3678 // Set an oopmap for the call site. 3679 // We need this not only for callee-saved registers, but also for volatile 3680 // registers that the compiler might be keeping live across a safepoint. 3681 3682 oop_maps->add_gc_map( __ offset() - start, map); 3683 3684 __ restore_thread(L7_thread_cache); 3685 // clear last_Java_sp 3686 __ reset_last_Java_frame(); 3687 3688 // Check for exceptions 3689 Label pending; 3690 3691 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3692 __ tst(O1); 3693 __ brx(Assembler::notEqual, true, Assembler::pn, pending); 3694 __ delayed()->nop(); 3695 3696 // get the returned methodOop 3697 3698 __ get_vm_result(G5_method); 3699 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 3700 3701 // O0 is where we want to jump, overwrite G3 which is saved and scratch 3702 3703 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 3704 3705 RegisterSaver::restore_live_registers(masm); 3706 3707 // We are back the the original state on entry and ready to go. 3708 3709 __ JMP(G3, 0); 3710 __ delayed()->nop(); 3711 3712 // Pending exception after the safepoint 3713 3714 __ bind(pending); 3715 3716 RegisterSaver::restore_live_registers(masm); 3717 3718 // We are back the the original state on entry. 3719 3720 // Tail-call forward_exception_entry, with the issuing PC in O7, 3721 // so it looks like the original nmethod called forward_exception_entry. 3722 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3723 __ JMP(O0, 0); 3724 __ delayed()->nop(); 3725 3726 // ------------- 3727 // make sure all code is generated 3728 masm->flush(); 3729 3730 // return the blob 3731 // frame_size_words or bytes?? 3732 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3733 } 3734 3735 void SharedRuntime::generate_stubs() { 3736 3737 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), 3738 "wrong_method_stub"); 3739 3740 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), 3741 "ic_miss_stub"); 3742 3743 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), 3744 "resolve_opt_virtual_call"); 3745 3746 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), 3747 "resolve_virtual_call"); 3748 3749 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), 3750 "resolve_static_call"); 3751 3752 _polling_page_safepoint_handler_blob = 3753 generate_handler_blob(CAST_FROM_FN_PTR(address, 3754 SafepointSynchronize::handle_polling_page_exception), false); 3755 3756 _polling_page_return_handler_blob = 3757 generate_handler_blob(CAST_FROM_FN_PTR(address, 3758 SafepointSynchronize::handle_polling_page_exception), true); 3759 3760 generate_deopt_blob(); 3761 3762 #ifdef COMPILER2 3763 generate_uncommon_trap_blob(); 3764 #endif // COMPILER2 3765 }