1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "code/debugInfoRec.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/vtableStubs.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "oops/compiledICHolder.hpp" 32 #include "prims/jvmtiRedefineClassesTrace.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "runtime/vframeArray.hpp" 35 #include "vmreg_sparc.inline.hpp" 36 #ifdef COMPILER1 37 #include "c1/c1_Runtime1.hpp" 38 #endif 39 #ifdef COMPILER2 40 #include "opto/runtime.hpp" 41 #endif 42 #ifdef SHARK 43 #include "compiler/compileBroker.hpp" 44 #include "shark/sharkCompiler.hpp" 45 #endif 46 #if INCLUDE_JVMCI 47 #include "jvmci/jvmciJavaClasses.hpp" 48 #endif 49 50 #define __ masm-> 51 52 53 class RegisterSaver { 54 55 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O. 56 // The Oregs are problematic. In the 32bit build the compiler can 57 // have O registers live with 64 bit quantities. A window save will 58 // cut the heads off of the registers. We have to do a very extensive 59 // stack dance to save and restore these properly. 60 61 // Note that the Oregs problem only exists if we block at either a polling 62 // page exception a compiled code safepoint that was not originally a call 63 // or deoptimize following one of these kinds of safepoints. 64 65 // Lots of registers to save. For all builds, a window save will preserve 66 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit 67 // builds a window-save will preserve the %o registers. In the LION build 68 // we need to save the 64-bit %o registers which requires we save them 69 // before the window-save (as then they become %i registers and get their 70 // heads chopped off on interrupt). We have to save some %g registers here 71 // as well. 72 enum { 73 // This frame's save area. Includes extra space for the native call: 74 // vararg's layout space and the like. Briefly holds the caller's 75 // register save area. 76 call_args_area = frame::register_save_words_sp_offset + 77 frame::memory_parameter_word_sp_offset*wordSize, 78 // Make sure save locations are always 8 byte aligned. 79 // can't use round_to because it doesn't produce compile time constant 80 start_of_extra_save_area = ((call_args_area + 7) & ~7), 81 g1_offset = start_of_extra_save_area, // g-regs needing saving 82 g3_offset = g1_offset+8, 83 g4_offset = g3_offset+8, 84 g5_offset = g4_offset+8, 85 o0_offset = g5_offset+8, 86 o1_offset = o0_offset+8, 87 o2_offset = o1_offset+8, 88 o3_offset = o2_offset+8, 89 o4_offset = o3_offset+8, 90 o5_offset = o4_offset+8, 91 start_of_flags_save_area = o5_offset+8, 92 ccr_offset = start_of_flags_save_area, 93 fsr_offset = ccr_offset + 8, 94 d00_offset = fsr_offset+8, // Start of float save area 95 register_save_size = d00_offset+8*32 96 }; 97 98 99 public: 100 101 static int Oexception_offset() { return o0_offset; }; 102 static int G3_offset() { return g3_offset; }; 103 static int G5_offset() { return g5_offset; }; 104 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 105 static void restore_live_registers(MacroAssembler* masm); 106 107 // During deoptimization only the result register need to be restored 108 // all the other values have already been extracted. 109 110 static void restore_result_registers(MacroAssembler* masm); 111 }; 112 113 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 114 // Record volatile registers as callee-save values in an OopMap so their save locations will be 115 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 116 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 117 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 118 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 119 int i; 120 // Always make the frame size 16 byte aligned. 121 int frame_size = round_to(additional_frame_words + register_save_size, 16); 122 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words 123 int frame_size_in_slots = frame_size / sizeof(jint); 124 // CodeBlob frame size is in words. 125 *total_frame_words = frame_size / wordSize; 126 // OopMap* map = new OopMap(*total_frame_words, 0); 127 OopMap* map = new OopMap(frame_size_in_slots, 0); 128 129 #if !defined(_LP64) 130 131 // Save 64-bit O registers; they will get their heads chopped off on a 'save'. 132 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 133 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 134 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 135 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 136 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 137 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 138 #endif /* _LP64 */ 139 140 __ save(SP, -frame_size, SP); 141 142 #ifndef _LP64 143 // Reload the 64 bit Oregs. Although they are now Iregs we load them 144 // to Oregs here to avoid interrupts cutting off their heads 145 146 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 147 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 148 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 149 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 150 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 151 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 152 153 __ stx(O0, SP, o0_offset+STACK_BIAS); 154 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg()); 155 156 __ stx(O1, SP, o1_offset+STACK_BIAS); 157 158 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg()); 159 160 __ stx(O2, SP, o2_offset+STACK_BIAS); 161 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg()); 162 163 __ stx(O3, SP, o3_offset+STACK_BIAS); 164 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg()); 165 166 __ stx(O4, SP, o4_offset+STACK_BIAS); 167 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg()); 168 169 __ stx(O5, SP, o5_offset+STACK_BIAS); 170 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg()); 171 #endif /* _LP64 */ 172 173 174 #ifdef _LP64 175 int debug_offset = 0; 176 #else 177 int debug_offset = 4; 178 #endif 179 // Save the G's 180 __ stx(G1, SP, g1_offset+STACK_BIAS); 181 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); 182 183 __ stx(G3, SP, g3_offset+STACK_BIAS); 184 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg()); 185 186 __ stx(G4, SP, g4_offset+STACK_BIAS); 187 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg()); 188 189 __ stx(G5, SP, g5_offset+STACK_BIAS); 190 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg()); 191 192 // This is really a waste but we'll keep things as they were for now 193 if (true) { 194 #ifndef _LP64 195 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next()); 196 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next()); 197 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next()); 198 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next()); 199 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next()); 200 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next()); 201 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next()); 202 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next()); 203 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next()); 204 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next()); 205 #endif /* _LP64 */ 206 } 207 208 209 // Save the flags 210 __ rdccr( G5 ); 211 __ stx(G5, SP, ccr_offset+STACK_BIAS); 212 __ stxfsr(SP, fsr_offset+STACK_BIAS); 213 214 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles) 215 int offset = d00_offset; 216 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 217 FloatRegister f = as_FloatRegister(i); 218 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS); 219 // Record as callee saved both halves of double registers (2 float registers). 220 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg()); 221 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next()); 222 offset += sizeof(double); 223 } 224 225 // And we're done. 226 227 return map; 228 } 229 230 231 // Pop the current frame and restore all the registers that we 232 // saved. 233 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 234 235 // Restore all the FP registers 236 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 237 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i)); 238 } 239 240 __ ldx(SP, ccr_offset+STACK_BIAS, G1); 241 __ wrccr (G1) ; 242 243 // Restore the G's 244 // Note that G2 (AKA GThread) must be saved and restored separately. 245 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr. 246 247 __ ldx(SP, g1_offset+STACK_BIAS, G1); 248 __ ldx(SP, g3_offset+STACK_BIAS, G3); 249 __ ldx(SP, g4_offset+STACK_BIAS, G4); 250 __ ldx(SP, g5_offset+STACK_BIAS, G5); 251 252 253 #if !defined(_LP64) 254 // Restore the 64-bit O's. 255 __ ldx(SP, o0_offset+STACK_BIAS, O0); 256 __ ldx(SP, o1_offset+STACK_BIAS, O1); 257 __ ldx(SP, o2_offset+STACK_BIAS, O2); 258 __ ldx(SP, o3_offset+STACK_BIAS, O3); 259 __ ldx(SP, o4_offset+STACK_BIAS, O4); 260 __ ldx(SP, o5_offset+STACK_BIAS, O5); 261 262 // And temporarily place them in TLS 263 264 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 265 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 266 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 267 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 268 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 269 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 270 #endif /* _LP64 */ 271 272 // Restore flags 273 274 __ ldxfsr(SP, fsr_offset+STACK_BIAS); 275 276 __ restore(); 277 278 #if !defined(_LP64) 279 // Now reload the 64bit Oregs after we've restore the window. 280 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 281 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 282 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 283 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 284 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 285 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 286 #endif /* _LP64 */ 287 288 } 289 290 // Pop the current frame and restore the registers that might be holding 291 // a result. 292 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 293 294 #if !defined(_LP64) 295 // 32bit build returns longs in G1 296 __ ldx(SP, g1_offset+STACK_BIAS, G1); 297 298 // Retrieve the 64-bit O's. 299 __ ldx(SP, o0_offset+STACK_BIAS, O0); 300 __ ldx(SP, o1_offset+STACK_BIAS, O1); 301 // and save to TLS 302 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 303 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 304 #endif /* _LP64 */ 305 306 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); 307 308 __ restore(); 309 310 #if !defined(_LP64) 311 // Now reload the 64bit Oregs after we've restore the window. 312 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 313 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 314 #endif /* _LP64 */ 315 316 } 317 318 // Is vector's size (in bytes) bigger than a size saved by default? 319 // 8 bytes FP registers are saved by default on SPARC. 320 bool SharedRuntime::is_wide_vector(int size) { 321 // Note, MaxVectorSize == 8 on SPARC. 322 assert(size <= 8, "%d bytes vectors are not supported", size); 323 return size > 8; 324 } 325 326 // The java_calling_convention describes stack locations as ideal slots on 327 // a frame with no abi restrictions. Since we must observe abi restrictions 328 // (like the placement of the register window) the slots must be biased by 329 // the following value. 330 static int reg2offset(VMReg r) { 331 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 332 } 333 334 static VMRegPair reg64_to_VMRegPair(Register r) { 335 VMRegPair ret; 336 if (wordSize == 8) { 337 ret.set2(r->as_VMReg()); 338 } else { 339 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg()); 340 } 341 return ret; 342 } 343 344 // --------------------------------------------------------------------------- 345 // Read the array of BasicTypes from a signature, and compute where the 346 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 347 // quantities. Values less than VMRegImpl::stack0 are registers, those above 348 // refer to 4-byte stack slots. All stack slots are based off of the window 349 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window, 350 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 351 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit 352 // integer registers. Values 64-95 are the (32-bit only) float registers. 353 // Each 32-bit quantity is given its own number, so the integer registers 354 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is 355 // an O0-low and an O0-high. Essentially, all int register numbers are doubled. 356 357 // Register results are passed in O0-O5, for outgoing call arguments. To 358 // convert to incoming arguments, convert all O's to I's. The regs array 359 // refer to the low and hi 32-bit words of 64-bit registers or stack slots. 360 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a 361 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was 362 // passed (used as a placeholder for the other half of longs and doubles in 363 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is 364 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention). 365 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first() 366 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the 367 // same VMRegPair. 368 369 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 370 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 371 // units regardless of build. 372 373 374 // --------------------------------------------------------------------------- 375 // The compiled Java calling convention. The Java convention always passes 376 // 64-bit values in adjacent aligned locations (either registers or stack), 377 // floats in float registers and doubles in aligned float pairs. There is 378 // no backing varargs store for values in registers. 379 // In the 32-bit build, longs are passed on the stack (cannot be 380 // passed in I's, because longs in I's get their heads chopped off at 381 // interrupt). 382 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 383 VMRegPair *regs, 384 int total_args_passed, 385 int is_outgoing) { 386 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers"); 387 388 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM; 389 const int flt_reg_max = 8; 390 391 int int_reg = 0; 392 int flt_reg = 0; 393 int slot = 0; 394 395 for (int i = 0; i < total_args_passed; i++) { 396 switch (sig_bt[i]) { 397 case T_INT: 398 case T_SHORT: 399 case T_CHAR: 400 case T_BYTE: 401 case T_BOOLEAN: 402 #ifndef _LP64 403 case T_OBJECT: 404 case T_ARRAY: 405 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 406 #endif // _LP64 407 if (int_reg < int_reg_max) { 408 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 409 regs[i].set1(r->as_VMReg()); 410 } else { 411 regs[i].set1(VMRegImpl::stack2reg(slot++)); 412 } 413 break; 414 415 #ifdef _LP64 416 case T_LONG: 417 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 418 // fall-through 419 case T_OBJECT: 420 case T_ARRAY: 421 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 422 if (int_reg < int_reg_max) { 423 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 424 regs[i].set2(r->as_VMReg()); 425 } else { 426 slot = round_to(slot, 2); // align 427 regs[i].set2(VMRegImpl::stack2reg(slot)); 428 slot += 2; 429 } 430 break; 431 #else 432 case T_LONG: 433 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 434 // On 32-bit SPARC put longs always on the stack to keep the pressure off 435 // integer argument registers. They should be used for oops. 436 slot = round_to(slot, 2); // align 437 regs[i].set2(VMRegImpl::stack2reg(slot)); 438 slot += 2; 439 #endif 440 break; 441 442 case T_FLOAT: 443 if (flt_reg < flt_reg_max) { 444 FloatRegister r = as_FloatRegister(flt_reg++); 445 regs[i].set1(r->as_VMReg()); 446 } else { 447 regs[i].set1(VMRegImpl::stack2reg(slot++)); 448 } 449 break; 450 451 case T_DOUBLE: 452 assert(sig_bt[i+1] == T_VOID, "expecting half"); 453 if (round_to(flt_reg, 2) + 1 < flt_reg_max) { 454 flt_reg = round_to(flt_reg, 2); // align 455 FloatRegister r = as_FloatRegister(flt_reg); 456 regs[i].set2(r->as_VMReg()); 457 flt_reg += 2; 458 } else { 459 slot = round_to(slot, 2); // align 460 regs[i].set2(VMRegImpl::stack2reg(slot)); 461 slot += 2; 462 } 463 break; 464 465 case T_VOID: 466 regs[i].set_bad(); // Halves of longs & doubles 467 break; 468 469 default: 470 fatal("unknown basic type %d", sig_bt[i]); 471 break; 472 } 473 } 474 475 // retun the amount of stack space these arguments will need. 476 return slot; 477 } 478 479 // Helper class mostly to avoid passing masm everywhere, and handle 480 // store displacement overflow logic. 481 class AdapterGenerator { 482 MacroAssembler *masm; 483 Register Rdisp; 484 void set_Rdisp(Register r) { Rdisp = r; } 485 486 void patch_callers_callsite(); 487 488 // base+st_off points to top of argument 489 int arg_offset(const int st_off) { return st_off; } 490 int next_arg_offset(const int st_off) { 491 return st_off - Interpreter::stackElementSize; 492 } 493 494 // Argument slot values may be loaded first into a register because 495 // they might not fit into displacement. 496 RegisterOrConstant arg_slot(const int st_off); 497 RegisterOrConstant next_arg_slot(const int st_off); 498 499 // Stores long into offset pointed to by base 500 void store_c2i_long(Register r, Register base, 501 const int st_off, bool is_stack); 502 void store_c2i_object(Register r, Register base, 503 const int st_off); 504 void store_c2i_int(Register r, Register base, 505 const int st_off); 506 void store_c2i_double(VMReg r_2, 507 VMReg r_1, Register base, const int st_off); 508 void store_c2i_float(FloatRegister f, Register base, 509 const int st_off); 510 511 public: 512 void gen_c2i_adapter(int total_args_passed, 513 // VMReg max_arg, 514 int comp_args_on_stack, // VMRegStackSlots 515 const BasicType *sig_bt, 516 const VMRegPair *regs, 517 Label& skip_fixup); 518 void gen_i2c_adapter(int total_args_passed, 519 // VMReg max_arg, 520 int comp_args_on_stack, // VMRegStackSlots 521 const BasicType *sig_bt, 522 const VMRegPair *regs); 523 524 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} 525 }; 526 527 528 // Patch the callers callsite with entry to compiled code if it exists. 529 void AdapterGenerator::patch_callers_callsite() { 530 Label L; 531 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 532 __ br_null(G3_scratch, false, Assembler::pt, L); 533 __ delayed()->nop(); 534 // Call into the VM to patch the caller, then jump to compiled callee 535 __ save_frame(4); // Args in compiled layout; do not blow them 536 537 // Must save all the live Gregs the list is: 538 // G1: 1st Long arg (32bit build) 539 // G2: global allocated to TLS 540 // G3: used in inline cache check (scratch) 541 // G4: 2nd Long arg (32bit build); 542 // G5: used in inline cache check (Method*) 543 544 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 545 546 #ifdef _LP64 547 // mov(s,d) 548 __ mov(G1, L1); 549 __ mov(G4, L4); 550 __ mov(G5_method, L5); 551 __ mov(G5_method, O0); // VM needs target method 552 __ mov(I7, O1); // VM needs caller's callsite 553 // Must be a leaf call... 554 // can be very far once the blob has been relocated 555 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 556 __ relocate(relocInfo::runtime_call_type); 557 __ jumpl_to(dest, O7, O7); 558 __ delayed()->mov(G2_thread, L7_thread_cache); 559 __ mov(L7_thread_cache, G2_thread); 560 __ mov(L1, G1); 561 __ mov(L4, G4); 562 __ mov(L5, G5_method); 563 #else 564 __ stx(G1, FP, -8 + STACK_BIAS); 565 __ stx(G4, FP, -16 + STACK_BIAS); 566 __ mov(G5_method, L5); 567 __ mov(G5_method, O0); // VM needs target method 568 __ mov(I7, O1); // VM needs caller's callsite 569 // Must be a leaf call... 570 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type); 571 __ delayed()->mov(G2_thread, L7_thread_cache); 572 __ mov(L7_thread_cache, G2_thread); 573 __ ldx(FP, -8 + STACK_BIAS, G1); 574 __ ldx(FP, -16 + STACK_BIAS, G4); 575 __ mov(L5, G5_method); 576 #endif /* _LP64 */ 577 578 __ restore(); // Restore args 579 __ bind(L); 580 } 581 582 583 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) { 584 RegisterOrConstant roc(arg_offset(st_off)); 585 return __ ensure_simm13_or_reg(roc, Rdisp); 586 } 587 588 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) { 589 RegisterOrConstant roc(next_arg_offset(st_off)); 590 return __ ensure_simm13_or_reg(roc, Rdisp); 591 } 592 593 594 // Stores long into offset pointed to by base 595 void AdapterGenerator::store_c2i_long(Register r, Register base, 596 const int st_off, bool is_stack) { 597 #ifdef _LP64 598 // In V9, longs are given 2 64-bit slots in the interpreter, but the 599 // data is passed in only 1 slot. 600 __ stx(r, base, next_arg_slot(st_off)); 601 #else 602 #ifdef COMPILER2 603 // Misaligned store of 64-bit data 604 __ stw(r, base, arg_slot(st_off)); // lo bits 605 __ srlx(r, 32, r); 606 __ stw(r, base, next_arg_slot(st_off)); // hi bits 607 #else 608 if (is_stack) { 609 // Misaligned store of 64-bit data 610 __ stw(r, base, arg_slot(st_off)); // lo bits 611 __ srlx(r, 32, r); 612 __ stw(r, base, next_arg_slot(st_off)); // hi bits 613 } else { 614 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits 615 __ stw(r , base, next_arg_slot(st_off)); // hi bits 616 } 617 #endif // COMPILER2 618 #endif // _LP64 619 } 620 621 void AdapterGenerator::store_c2i_object(Register r, Register base, 622 const int st_off) { 623 __ st_ptr (r, base, arg_slot(st_off)); 624 } 625 626 void AdapterGenerator::store_c2i_int(Register r, Register base, 627 const int st_off) { 628 __ st (r, base, arg_slot(st_off)); 629 } 630 631 // Stores into offset pointed to by base 632 void AdapterGenerator::store_c2i_double(VMReg r_2, 633 VMReg r_1, Register base, const int st_off) { 634 #ifdef _LP64 635 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 636 // data is passed in only 1 slot. 637 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 638 #else 639 // Need to marshal 64-bit value from misaligned Lesp loads 640 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 641 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) ); 642 #endif 643 } 644 645 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 646 const int st_off) { 647 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off)); 648 } 649 650 void AdapterGenerator::gen_c2i_adapter( 651 int total_args_passed, 652 // VMReg max_arg, 653 int comp_args_on_stack, // VMRegStackSlots 654 const BasicType *sig_bt, 655 const VMRegPair *regs, 656 Label& L_skip_fixup) { 657 658 // Before we get into the guts of the C2I adapter, see if we should be here 659 // at all. We've come from compiled code and are attempting to jump to the 660 // interpreter, which means the caller made a static call to get here 661 // (vcalls always get a compiled target if there is one). Check for a 662 // compiled target. If there is one, we need to patch the caller's call. 663 // However we will run interpreted if we come thru here. The next pass 664 // thru the call site will run compiled. If we ran compiled here then 665 // we can (theorectically) do endless i2c->c2i->i2c transitions during 666 // deopt/uncommon trap cycles. If we always go interpreted here then 667 // we can have at most one and don't need to play any tricks to keep 668 // from endlessly growing the stack. 669 // 670 // Actually if we detected that we had an i2c->c2i transition here we 671 // ought to be able to reset the world back to the state of the interpreted 672 // call and not bother building another interpreter arg area. We don't 673 // do that at this point. 674 675 patch_callers_callsite(); 676 677 __ bind(L_skip_fixup); 678 679 // Since all args are passed on the stack, total_args_passed*wordSize is the 680 // space we need. Add in varargs area needed by the interpreter. Round up 681 // to stack alignment. 682 const int arg_size = total_args_passed * Interpreter::stackElementSize; 683 const int varargs_area = 684 (frame::varargs_offset - frame::register_save_words)*wordSize; 685 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize); 686 687 const int bias = STACK_BIAS; 688 const int interp_arg_offset = frame::varargs_offset*wordSize + 689 (total_args_passed-1)*Interpreter::stackElementSize; 690 691 const Register base = SP; 692 693 // Make some extra space on the stack. 694 __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP); 695 set_Rdisp(G3_scratch); 696 697 // Write the args into the outgoing interpreter space. 698 for (int i = 0; i < total_args_passed; i++) { 699 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias; 700 VMReg r_1 = regs[i].first(); 701 VMReg r_2 = regs[i].second(); 702 if (!r_1->is_valid()) { 703 assert(!r_2->is_valid(), ""); 704 continue; 705 } 706 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1 707 RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias; 708 ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp); 709 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle 710 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); 711 else __ ldx(base, ld_off, G1_scratch); 712 } 713 714 if (r_1->is_Register()) { 715 Register r = r_1->as_Register()->after_restore(); 716 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 717 store_c2i_object(r, base, st_off); 718 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 719 store_c2i_long(r, base, st_off, r_2->is_stack()); 720 } else { 721 store_c2i_int(r, base, st_off); 722 } 723 } else { 724 assert(r_1->is_FloatRegister(), ""); 725 if (sig_bt[i] == T_FLOAT) { 726 store_c2i_float(r_1->as_FloatRegister(), base, st_off); 727 } else { 728 assert(sig_bt[i] == T_DOUBLE, "wrong type"); 729 store_c2i_double(r_2, r_1, base, st_off); 730 } 731 } 732 } 733 734 // Load the interpreter entry point. 735 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 736 737 // Pass O5_savedSP as an argument to the interpreter. 738 // The interpreter will restore SP to this value before returning. 739 __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP); 740 741 __ mov((frame::varargs_offset)*wordSize - 742 1*Interpreter::stackElementSize+bias+BytesPerWord, G1); 743 // Jump to the interpreter just as if interpreter was doing it. 744 __ jmpl(G3_scratch, 0, G0); 745 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp 746 // (really L0) is in use by the compiled frame as a generic temp. However, 747 // the interpreter does not know where its args are without some kind of 748 // arg pointer being passed in. Pass it in Gargs. 749 __ delayed()->add(SP, G1, Gargs); 750 } 751 752 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg, 753 address code_start, address code_end, 754 Label& L_ok) { 755 Label L_fail; 756 __ set(ExternalAddress(code_start), temp_reg); 757 __ set(pointer_delta(code_end, code_start, 1), temp2_reg); 758 __ cmp(pc_reg, temp_reg); 759 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail); 760 __ delayed()->add(temp_reg, temp2_reg, temp_reg); 761 __ cmp(pc_reg, temp_reg); 762 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); 763 __ bind(L_fail); 764 } 765 766 void AdapterGenerator::gen_i2c_adapter(int total_args_passed, 767 // VMReg max_arg, 768 int comp_args_on_stack, // VMRegStackSlots 769 const BasicType *sig_bt, 770 const VMRegPair *regs) { 771 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 772 // layout. Lesp was saved by the calling I-frame and will be restored on 773 // return. Meanwhile, outgoing arg space is all owned by the callee 774 // C-frame, so we can mangle it at will. After adjusting the frame size, 775 // hoist register arguments and repack other args according to the compiled 776 // code convention. Finally, end in a jump to the compiled code. The entry 777 // point address is the start of the buffer. 778 779 // We will only enter here from an interpreted frame and never from after 780 // passing thru a c2i. Azul allowed this but we do not. If we lose the 781 // race and use a c2i we will remain interpreted for the race loser(s). 782 // This removes all sorts of headaches on the x86 side and also eliminates 783 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 784 785 // More detail: 786 // Adapters can be frameless because they do not require the caller 787 // to perform additional cleanup work, such as correcting the stack pointer. 788 // An i2c adapter is frameless because the *caller* frame, which is interpreted, 789 // routinely repairs its own stack pointer (from interpreter_frame_last_sp), 790 // even if a callee has modified the stack pointer. 791 // A c2i adapter is frameless because the *callee* frame, which is interpreted, 792 // routinely repairs its caller's stack pointer (from sender_sp, which is set 793 // up via the senderSP register). 794 // In other words, if *either* the caller or callee is interpreted, we can 795 // get the stack pointer repaired after a call. 796 // This is why c2i and i2c adapters cannot be indefinitely composed. 797 // In particular, if a c2i adapter were to somehow call an i2c adapter, 798 // both caller and callee would be compiled methods, and neither would 799 // clean up the stack pointer changes performed by the two adapters. 800 // If this happens, control eventually transfers back to the compiled 801 // caller, but with an uncorrected stack, causing delayed havoc. 802 803 if (VerifyAdapterCalls && 804 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { 805 // So, let's test for cascading c2i/i2c adapters right now. 806 // assert(Interpreter::contains($return_addr) || 807 // StubRoutines::contains($return_addr), 808 // "i2c adapter must return to an interpreter frame"); 809 __ block_comment("verify_i2c { "); 810 Label L_ok; 811 if (Interpreter::code() != NULL) 812 range_check(masm, O7, O0, O1, 813 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 814 L_ok); 815 if (StubRoutines::code1() != NULL) 816 range_check(masm, O7, O0, O1, 817 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), 818 L_ok); 819 if (StubRoutines::code2() != NULL) 820 range_check(masm, O7, O0, O1, 821 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), 822 L_ok); 823 const char* msg = "i2c adapter must return to an interpreter frame"; 824 __ block_comment(msg); 825 __ stop(msg); 826 __ bind(L_ok); 827 __ block_comment("} verify_i2ce "); 828 } 829 830 // As you can see from the list of inputs & outputs there are not a lot 831 // of temp registers to work with: mostly G1, G3 & G4. 832 833 // Inputs: 834 // G2_thread - TLS 835 // G5_method - Method oop 836 // G4 (Gargs) - Pointer to interpreter's args 837 // O0..O4 - free for scratch 838 // O5_savedSP - Caller's saved SP, to be restored if needed 839 // O6 - Current SP! 840 // O7 - Valid return address 841 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 842 843 // Outputs: 844 // G2_thread - TLS 845 // O0-O5 - Outgoing args in compiled layout 846 // O6 - Adjusted or restored SP 847 // O7 - Valid return address 848 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 849 // F0-F7 - more outgoing args 850 851 852 // Gargs is the incoming argument base, and also an outgoing argument. 853 __ sub(Gargs, BytesPerWord, Gargs); 854 855 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME 856 // WITH O7 HOLDING A VALID RETURN PC 857 // 858 // | | 859 // : java stack : 860 // | | 861 // +--------------+ <--- start of outgoing args 862 // | receiver | | 863 // : rest of args : |---size is java-arg-words 864 // | | | 865 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I 866 // | | | 867 // : unused : |---Space for max Java stack, plus stack alignment 868 // | | | 869 // +--------------+ <--- SP + 16*wordsize 870 // | | 871 // : window : 872 // | | 873 // +--------------+ <--- SP 874 875 // WE REPACK THE STACK. We use the common calling convention layout as 876 // discovered by calling SharedRuntime::calling_convention. We assume it 877 // causes an arbitrary shuffle of memory, which may require some register 878 // temps to do the shuffle. We hope for (and optimize for) the case where 879 // temps are not needed. We may have to resize the stack slightly, in case 880 // we need alignment padding (32-bit interpreter can pass longs & doubles 881 // misaligned, but the compilers expect them aligned). 882 // 883 // | | 884 // : java stack : 885 // | | 886 // +--------------+ <--- start of outgoing args 887 // | pad, align | | 888 // +--------------+ | 889 // | ints, longs, | | 890 // | floats, | |---Outgoing stack args. 891 // : doubles : | First few args in registers. 892 // | | | 893 // +--------------+ <--- SP' + 16*wordsize 894 // | | 895 // : window : 896 // | | 897 // +--------------+ <--- SP' 898 899 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME 900 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP 901 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN. 902 903 // Cut-out for having no stack args. Since up to 6 args are passed 904 // in registers, we will commonly have no stack args. 905 if (comp_args_on_stack > 0) { 906 // Convert VMReg stack slots to words. 907 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 908 // Round up to miminum stack alignment, in wordSize 909 comp_words_on_stack = round_to(comp_words_on_stack, 2); 910 // Now compute the distance from Lesp to SP. This calculation does not 911 // include the space for total_args_passed because Lesp has not yet popped 912 // the arguments. 913 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 914 } 915 916 // Now generate the shuffle code. Pick up all register args and move the 917 // rest through G1_scratch. 918 for (int i = 0; i < total_args_passed; i++) { 919 if (sig_bt[i] == T_VOID) { 920 // Longs and doubles are passed in native word order, but misaligned 921 // in the 32-bit build. 922 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 923 continue; 924 } 925 926 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the 927 // 32-bit build and aligned in the 64-bit build. Look for the obvious 928 // ldx/lddf optimizations. 929 930 // Load in argument order going down. 931 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize; 932 set_Rdisp(G1_scratch); 933 934 VMReg r_1 = regs[i].first(); 935 VMReg r_2 = regs[i].second(); 936 if (!r_1->is_valid()) { 937 assert(!r_2->is_valid(), ""); 938 continue; 939 } 940 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9 941 r_1 = F8->as_VMReg(); // as part of the load/store shuffle 942 if (r_2->is_valid()) r_2 = r_1->next(); 943 } 944 if (r_1->is_Register()) { // Register argument 945 Register r = r_1->as_Register()->after_restore(); 946 if (!r_2->is_valid()) { 947 __ ld(Gargs, arg_slot(ld_off), r); 948 } else { 949 #ifdef _LP64 950 // In V9, longs are given 2 64-bit slots in the interpreter, but the 951 // data is passed in only 1 slot. 952 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ? 953 next_arg_slot(ld_off) : arg_slot(ld_off); 954 __ ldx(Gargs, slot, r); 955 #else 956 fatal("longs should be on stack"); 957 #endif 958 } 959 } else { 960 assert(r_1->is_FloatRegister(), ""); 961 if (!r_2->is_valid()) { 962 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); 963 } else { 964 #ifdef _LP64 965 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 966 // data is passed in only 1 slot. This code also handles longs that 967 // are passed on the stack, but need a stack-to-stack move through a 968 // spare float register. 969 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? 970 next_arg_slot(ld_off) : arg_slot(ld_off); 971 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); 972 #else 973 // Need to marshal 64-bit value from misaligned Lesp loads 974 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister()); 975 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister()); 976 #endif 977 } 978 } 979 // Was the argument really intended to be on the stack, but was loaded 980 // into F8/F9? 981 if (regs[i].first()->is_stack()) { 982 assert(r_1->as_FloatRegister() == F8, "fix this code"); 983 // Convert stack slot to an SP offset 984 int st_off = reg2offset(regs[i].first()) + STACK_BIAS; 985 // Store down the shuffled stack word. Target address _is_ aligned. 986 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp); 987 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot); 988 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot); 989 } 990 } 991 992 // Jump to the compiled code just as if compiled code was doing it. 993 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3); 994 #if INCLUDE_JVMCI 995 if (EnableJVMCI) { 996 // check if this call should be routed towards a specific entry point 997 __ ld(Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), G1); 998 __ cmp(G0, G1); 999 Label no_alternative_target; 1000 __ br(Assembler::equal, false, Assembler::pn, no_alternative_target); 1001 __ delayed()->nop(); 1002 1003 __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()), G3); 1004 __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 1005 1006 __ bind(no_alternative_target); 1007 } 1008 #endif // INCLUDE_JVMCI 1009 1010 // 6243940 We might end up in handle_wrong_method if 1011 // the callee is deoptimized as we race thru here. If that 1012 // happens we don't want to take a safepoint because the 1013 // caller frame will look interpreted and arguments are now 1014 // "compiled" so it is much better to make this transition 1015 // invisible to the stack walking code. Unfortunately if 1016 // we try and find the callee by normal means a safepoint 1017 // is possible. So we stash the desired callee in the thread 1018 // and the vm will find there should this case occur. 1019 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); 1020 __ st_ptr(G5_method, callee_target_addr); 1021 __ jmpl(G3, 0, G0); 1022 __ delayed()->nop(); 1023 } 1024 1025 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 1026 int total_args_passed, 1027 int comp_args_on_stack, 1028 const BasicType *sig_bt, 1029 const VMRegPair *regs) { 1030 AdapterGenerator agen(masm); 1031 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 1032 } 1033 1034 // --------------------------------------------------------------- 1035 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1036 int total_args_passed, 1037 // VMReg max_arg, 1038 int comp_args_on_stack, // VMRegStackSlots 1039 const BasicType *sig_bt, 1040 const VMRegPair *regs, 1041 AdapterFingerPrint* fingerprint) { 1042 address i2c_entry = __ pc(); 1043 1044 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1045 1046 1047 // ------------------------------------------------------------------------- 1048 // Generate a C2I adapter. On entry we know G5 holds the Method*. The 1049 // args start out packed in the compiled layout. They need to be unpacked 1050 // into the interpreter layout. This will almost always require some stack 1051 // space. We grow the current (compiled) stack, then repack the args. We 1052 // finally end in a jump to the generic interpreter entry point. On exit 1053 // from the interpreter, the interpreter will restore our SP (lest the 1054 // compiled code, which relys solely on SP and not FP, get sick). 1055 1056 address c2i_unverified_entry = __ pc(); 1057 Label L_skip_fixup; 1058 { 1059 Register R_temp = G1; // another scratch register 1060 1061 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1062 1063 __ verify_oop(O0); 1064 __ load_klass(O0, G3_scratch); 1065 1066 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp); 1067 __ cmp(G3_scratch, R_temp); 1068 1069 Label ok, ok2; 1070 __ brx(Assembler::equal, false, Assembler::pt, ok); 1071 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method); 1072 __ jump_to(ic_miss, G3_scratch); 1073 __ delayed()->nop(); 1074 1075 __ bind(ok); 1076 // Method might have been compiled since the call site was patched to 1077 // interpreted if that is the case treat it as a miss so we can get 1078 // the call site corrected. 1079 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 1080 __ bind(ok2); 1081 __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup); 1082 __ delayed()->nop(); 1083 __ jump_to(ic_miss, G3_scratch); 1084 __ delayed()->nop(); 1085 1086 } 1087 1088 address c2i_entry = __ pc(); 1089 AdapterGenerator agen(masm); 1090 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup); 1091 1092 __ flush(); 1093 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 1094 1095 } 1096 1097 // Helper function for native calling conventions 1098 static VMReg int_stk_helper( int i ) { 1099 // Bias any stack based VMReg we get by ignoring the window area 1100 // but not the register parameter save area. 1101 // 1102 // This is strange for the following reasons. We'd normally expect 1103 // the calling convention to return an VMReg for a stack slot 1104 // completely ignoring any abi reserved area. C2 thinks of that 1105 // abi area as only out_preserve_stack_slots. This does not include 1106 // the area allocated by the C abi to store down integer arguments 1107 // because the java calling convention does not use it. So 1108 // since c2 assumes that there are only out_preserve_stack_slots 1109 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack 1110 // location the c calling convention must add in this bias amount 1111 // to make up for the fact that the out_preserve_stack_slots is 1112 // insufficient for C calls. What a mess. I sure hope those 6 1113 // stack words were worth it on every java call! 1114 1115 // Another way of cleaning this up would be for out_preserve_stack_slots 1116 // to take a parameter to say whether it was C or java calling conventions. 1117 // Then things might look a little better (but not much). 1118 1119 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM; 1120 if( mem_parm_offset < 0 ) { 1121 return as_oRegister(i)->as_VMReg(); 1122 } else { 1123 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word; 1124 // Now return a biased offset that will be correct when out_preserve_slots is added back in 1125 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots()); 1126 } 1127 } 1128 1129 1130 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1131 VMRegPair *regs, 1132 VMRegPair *regs2, 1133 int total_args_passed) { 1134 assert(regs2 == NULL, "not needed on sparc"); 1135 1136 // Return the number of VMReg stack_slots needed for the args. 1137 // This value does not include an abi space (like register window 1138 // save area). 1139 1140 // The native convention is V8 if !LP64 1141 // The LP64 convention is the V9 convention which is slightly more sane. 1142 1143 // We return the amount of VMReg stack slots we need to reserve for all 1144 // the arguments NOT counting out_preserve_stack_slots. Since we always 1145 // have space for storing at least 6 registers to memory we start with that. 1146 // See int_stk_helper for a further discussion. 1147 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); 1148 1149 #ifdef _LP64 1150 // V9 convention: All things "as-if" on double-wide stack slots. 1151 // Hoist any int/ptr/long's in the first 6 to int regs. 1152 // Hoist any flt/dbl's in the first 16 dbl regs. 1153 int j = 0; // Count of actual args, not HALVES 1154 VMRegPair param_array_reg; // location of the argument in the parameter array 1155 for (int i = 0; i < total_args_passed; i++, j++) { 1156 param_array_reg.set_bad(); 1157 switch (sig_bt[i]) { 1158 case T_BOOLEAN: 1159 case T_BYTE: 1160 case T_CHAR: 1161 case T_INT: 1162 case T_SHORT: 1163 regs[i].set1(int_stk_helper(j)); 1164 break; 1165 case T_LONG: 1166 assert(sig_bt[i+1] == T_VOID, "expecting half"); 1167 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1168 case T_ARRAY: 1169 case T_OBJECT: 1170 case T_METADATA: 1171 regs[i].set2(int_stk_helper(j)); 1172 break; 1173 case T_FLOAT: 1174 // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here 1175 // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz 1176 // 1177 // "When a callee prototype exists, and does not indicate variable arguments, 1178 // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248 1179 // will be promoted to floating-point registers" 1180 // 1181 // By "promoted" it means that the argument is located in two places, an unused 1182 // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live 1183 // float register. In most cases, there are 6 or fewer arguments of any type, 1184 // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive) 1185 // serve as shadow slots. Per the spec floating point registers %d6 to %d16 1186 // require slots beyond that (up to %sp+BIAS+248). 1187 // 1188 { 1189 // V9ism: floats go in ODD registers and stack slots 1190 int float_index = 1 + (j << 1); 1191 param_array_reg.set1(VMRegImpl::stack2reg(float_index)); 1192 if (j < 16) { 1193 regs[i].set1(as_FloatRegister(float_index)->as_VMReg()); 1194 } else { 1195 regs[i] = param_array_reg; 1196 } 1197 } 1198 break; 1199 case T_DOUBLE: 1200 { 1201 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 1202 // V9ism: doubles go in EVEN/ODD regs and stack slots 1203 int double_index = (j << 1); 1204 param_array_reg.set2(VMRegImpl::stack2reg(double_index)); 1205 if (j < 16) { 1206 regs[i].set2(as_FloatRegister(double_index)->as_VMReg()); 1207 } else { 1208 // V9ism: doubles go in EVEN/ODD stack slots 1209 regs[i] = param_array_reg; 1210 } 1211 } 1212 break; 1213 case T_VOID: 1214 regs[i].set_bad(); 1215 j--; 1216 break; // Do not count HALVES 1217 default: 1218 ShouldNotReachHere(); 1219 } 1220 // Keep track of the deepest parameter array slot. 1221 if (!param_array_reg.first()->is_valid()) { 1222 param_array_reg = regs[i]; 1223 } 1224 if (param_array_reg.first()->is_stack()) { 1225 int off = param_array_reg.first()->reg2stack(); 1226 if (off > max_stack_slots) max_stack_slots = off; 1227 } 1228 if (param_array_reg.second()->is_stack()) { 1229 int off = param_array_reg.second()->reg2stack(); 1230 if (off > max_stack_slots) max_stack_slots = off; 1231 } 1232 } 1233 1234 #else // _LP64 1235 // V8 convention: first 6 things in O-regs, rest on stack. 1236 // Alignment is willy-nilly. 1237 for (int i = 0; i < total_args_passed; i++) { 1238 switch (sig_bt[i]) { 1239 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1240 case T_ARRAY: 1241 case T_BOOLEAN: 1242 case T_BYTE: 1243 case T_CHAR: 1244 case T_FLOAT: 1245 case T_INT: 1246 case T_OBJECT: 1247 case T_METADATA: 1248 case T_SHORT: 1249 regs[i].set1(int_stk_helper(i)); 1250 break; 1251 case T_DOUBLE: 1252 case T_LONG: 1253 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 1254 regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i)); 1255 break; 1256 case T_VOID: regs[i].set_bad(); break; 1257 default: 1258 ShouldNotReachHere(); 1259 } 1260 if (regs[i].first()->is_stack()) { 1261 int off = regs[i].first()->reg2stack(); 1262 if (off > max_stack_slots) max_stack_slots = off; 1263 } 1264 if (regs[i].second()->is_stack()) { 1265 int off = regs[i].second()->reg2stack(); 1266 if (off > max_stack_slots) max_stack_slots = off; 1267 } 1268 } 1269 #endif // _LP64 1270 1271 return round_to(max_stack_slots + 1, 2); 1272 1273 } 1274 1275 1276 // --------------------------------------------------------------------------- 1277 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1278 switch (ret_type) { 1279 case T_FLOAT: 1280 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS); 1281 break; 1282 case T_DOUBLE: 1283 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS); 1284 break; 1285 } 1286 } 1287 1288 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1289 switch (ret_type) { 1290 case T_FLOAT: 1291 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0); 1292 break; 1293 case T_DOUBLE: 1294 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0); 1295 break; 1296 } 1297 } 1298 1299 // Check and forward and pending exception. Thread is stored in 1300 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there 1301 // is no exception handler. We merely pop this frame off and throw the 1302 // exception in the caller's frame. 1303 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) { 1304 Label L; 1305 __ br_null(Rex_oop, false, Assembler::pt, L); 1306 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1307 // Since this is a native call, we *know* the proper exception handler 1308 // without calling into the VM: it's the empty function. Just pop this 1309 // frame and then jump to forward_exception_entry; O7 will contain the 1310 // native caller's return PC. 1311 AddressLiteral exception_entry(StubRoutines::forward_exception_entry()); 1312 __ jump_to(exception_entry, G3_scratch); 1313 __ delayed()->restore(); // Pop this frame off. 1314 __ bind(L); 1315 } 1316 1317 // A simple move of integer like type 1318 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1319 if (src.first()->is_stack()) { 1320 if (dst.first()->is_stack()) { 1321 // stack to stack 1322 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1323 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1324 } else { 1325 // stack to reg 1326 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1327 } 1328 } else if (dst.first()->is_stack()) { 1329 // reg to stack 1330 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1331 } else { 1332 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1333 } 1334 } 1335 1336 // On 64 bit we will store integer like items to the stack as 1337 // 64 bits items (sparc abi) even though java would only store 1338 // 32bits for a parameter. On 32bit it will simply be 32 bits 1339 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 1340 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1341 if (src.first()->is_stack()) { 1342 if (dst.first()->is_stack()) { 1343 // stack to stack 1344 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1345 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1346 } else { 1347 // stack to reg 1348 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1349 } 1350 } else if (dst.first()->is_stack()) { 1351 // reg to stack 1352 Register src_reg = src.first()->as_Register(); 1353 // Some compiler (gcc) expects a clean 32 bit value on function entry 1354 __ sra(src_reg, 0, L5); 1355 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1356 } else { 1357 Register src_reg = src.first()->as_Register(); 1358 // Some compiler (gcc) expects a clean 32 bit value on function entry 1359 __ sra(src_reg, 0, L5); 1360 __ mov(L5, dst.first()->as_Register()); 1361 } 1362 } 1363 1364 1365 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1366 if (src.first()->is_stack()) { 1367 if (dst.first()->is_stack()) { 1368 // stack to stack 1369 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1370 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1371 } else { 1372 // stack to reg 1373 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1374 } 1375 } else if (dst.first()->is_stack()) { 1376 // reg to stack 1377 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1378 } else { 1379 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1380 } 1381 } 1382 1383 1384 // An oop arg. Must pass a handle not the oop itself 1385 static void object_move(MacroAssembler* masm, 1386 OopMap* map, 1387 int oop_handle_offset, 1388 int framesize_in_slots, 1389 VMRegPair src, 1390 VMRegPair dst, 1391 bool is_receiver, 1392 int* receiver_offset) { 1393 1394 // must pass a handle. First figure out the location we use as a handle 1395 1396 if (src.first()->is_stack()) { 1397 // Oop is already on the stack 1398 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); 1399 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); 1400 __ ld_ptr(rHandle, 0, L4); 1401 #ifdef _LP64 1402 __ movr( Assembler::rc_z, L4, G0, rHandle ); 1403 #else 1404 __ tst( L4 ); 1405 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1406 #endif 1407 if (dst.first()->is_stack()) { 1408 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1409 } 1410 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1411 if (is_receiver) { 1412 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1413 } 1414 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1415 } else { 1416 // Oop is in an input register pass we must flush it to the stack 1417 const Register rOop = src.first()->as_Register(); 1418 const Register rHandle = L5; 1419 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; 1420 int offset = oop_slot * VMRegImpl::stack_slot_size; 1421 __ st_ptr(rOop, SP, offset + STACK_BIAS); 1422 if (is_receiver) { 1423 *receiver_offset = offset; 1424 } 1425 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1426 __ add(SP, offset + STACK_BIAS, rHandle); 1427 #ifdef _LP64 1428 __ movr( Assembler::rc_z, rOop, G0, rHandle ); 1429 #else 1430 __ tst( rOop ); 1431 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1432 #endif 1433 1434 if (dst.first()->is_stack()) { 1435 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1436 } else { 1437 __ mov(rHandle, dst.first()->as_Register()); 1438 } 1439 } 1440 } 1441 1442 // A float arg may have to do float reg int reg conversion 1443 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1444 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1445 1446 if (src.first()->is_stack()) { 1447 if (dst.first()->is_stack()) { 1448 // stack to stack the easiest of the bunch 1449 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1450 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1451 } else { 1452 // stack to reg 1453 if (dst.first()->is_Register()) { 1454 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1455 } else { 1456 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1457 } 1458 } 1459 } else if (dst.first()->is_stack()) { 1460 // reg to stack 1461 if (src.first()->is_Register()) { 1462 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1463 } else { 1464 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1465 } 1466 } else { 1467 // reg to reg 1468 if (src.first()->is_Register()) { 1469 if (dst.first()->is_Register()) { 1470 // gpr -> gpr 1471 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1472 } else { 1473 // gpr -> fpr 1474 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS); 1475 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister()); 1476 } 1477 } else if (dst.first()->is_Register()) { 1478 // fpr -> gpr 1479 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS); 1480 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register()); 1481 } else { 1482 // fpr -> fpr 1483 // In theory these overlap but the ordering is such that this is likely a nop 1484 if ( src.first() != dst.first()) { 1485 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1486 } 1487 } 1488 } 1489 } 1490 1491 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1492 VMRegPair src_lo(src.first()); 1493 VMRegPair src_hi(src.second()); 1494 VMRegPair dst_lo(dst.first()); 1495 VMRegPair dst_hi(dst.second()); 1496 simple_move32(masm, src_lo, dst_lo); 1497 simple_move32(masm, src_hi, dst_hi); 1498 } 1499 1500 // A long move 1501 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1502 1503 // Do the simple ones here else do two int moves 1504 if (src.is_single_phys_reg() ) { 1505 if (dst.is_single_phys_reg()) { 1506 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1507 } else { 1508 // split src into two separate registers 1509 // Remember hi means hi address or lsw on sparc 1510 // Move msw to lsw 1511 if (dst.second()->is_reg()) { 1512 // MSW -> MSW 1513 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register()); 1514 // Now LSW -> LSW 1515 // this will only move lo -> lo and ignore hi 1516 VMRegPair split(dst.second()); 1517 simple_move32(masm, src, split); 1518 } else { 1519 VMRegPair split(src.first(), L4->as_VMReg()); 1520 // MSW -> MSW (lo ie. first word) 1521 __ srax(src.first()->as_Register(), 32, L4); 1522 split_long_move(masm, split, dst); 1523 } 1524 } 1525 } else if (dst.is_single_phys_reg()) { 1526 if (src.is_adjacent_aligned_on_stack(2)) { 1527 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1528 } else { 1529 // dst is a single reg. 1530 // Remember lo is low address not msb for stack slots 1531 // and lo is the "real" register for registers 1532 // src is 1533 1534 VMRegPair split; 1535 1536 if (src.first()->is_reg()) { 1537 // src.lo (msw) is a reg, src.hi is stk/reg 1538 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg] 1539 split.set_pair(dst.first(), src.first()); 1540 } else { 1541 // msw is stack move to L5 1542 // lsw is stack move to dst.lo (real reg) 1543 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5 1544 split.set_pair(dst.first(), L5->as_VMReg()); 1545 } 1546 1547 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg) 1548 // msw -> src.lo/L5, lsw -> dst.lo 1549 split_long_move(masm, src, split); 1550 1551 // So dst now has the low order correct position the 1552 // msw half 1553 __ sllx(split.first()->as_Register(), 32, L5); 1554 1555 const Register d = dst.first()->as_Register(); 1556 __ or3(L5, d, d); 1557 } 1558 } else { 1559 // For LP64 we can probably do better. 1560 split_long_move(masm, src, dst); 1561 } 1562 } 1563 1564 // A double move 1565 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1566 1567 // The painful thing here is that like long_move a VMRegPair might be 1568 // 1: a single physical register 1569 // 2: two physical registers (v8) 1570 // 3: a physical reg [lo] and a stack slot [hi] (v8) 1571 // 4: two stack slots 1572 1573 // Since src is always a java calling convention we know that the src pair 1574 // is always either all registers or all stack (and aligned?) 1575 1576 // in a register [lo] and a stack slot [hi] 1577 if (src.first()->is_stack()) { 1578 if (dst.first()->is_stack()) { 1579 // stack to stack the easiest of the bunch 1580 // ought to be a way to do this where if alignment is ok we use ldd/std when possible 1581 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1582 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1583 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1584 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1585 } else { 1586 // stack to reg 1587 if (dst.second()->is_stack()) { 1588 // stack -> reg, stack -> stack 1589 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1590 if (dst.first()->is_Register()) { 1591 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1592 } else { 1593 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1594 } 1595 // This was missing. (very rare case) 1596 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1597 } else { 1598 // stack -> reg 1599 // Eventually optimize for alignment QQQ 1600 if (dst.first()->is_Register()) { 1601 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1602 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register()); 1603 } else { 1604 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1605 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister()); 1606 } 1607 } 1608 } 1609 } else if (dst.first()->is_stack()) { 1610 // reg to stack 1611 if (src.first()->is_Register()) { 1612 // Eventually optimize for alignment QQQ 1613 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1614 if (src.second()->is_stack()) { 1615 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1616 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1617 } else { 1618 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS); 1619 } 1620 } else { 1621 // fpr to stack 1622 if (src.second()->is_stack()) { 1623 ShouldNotReachHere(); 1624 } else { 1625 // Is the stack aligned? 1626 if (reg2offset(dst.first()) & 0x7) { 1627 // No do as pairs 1628 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1629 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS); 1630 } else { 1631 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1632 } 1633 } 1634 } 1635 } else { 1636 // reg to reg 1637 if (src.first()->is_Register()) { 1638 if (dst.first()->is_Register()) { 1639 // gpr -> gpr 1640 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1641 __ mov(src.second()->as_Register(), dst.second()->as_Register()); 1642 } else { 1643 // gpr -> fpr 1644 // ought to be able to do a single store 1645 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS); 1646 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS); 1647 // ought to be able to do a single load 1648 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister()); 1649 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister()); 1650 } 1651 } else if (dst.first()->is_Register()) { 1652 // fpr -> gpr 1653 // ought to be able to do a single store 1654 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS); 1655 // ought to be able to do a single load 1656 // REMEMBER first() is low address not LSB 1657 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register()); 1658 if (dst.second()->is_Register()) { 1659 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register()); 1660 } else { 1661 __ ld(FP, -4 + STACK_BIAS, L4); 1662 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1663 } 1664 } else { 1665 // fpr -> fpr 1666 // In theory these overlap but the ordering is such that this is likely a nop 1667 if ( src.first() != dst.first()) { 1668 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1669 } 1670 } 1671 } 1672 } 1673 1674 // Creates an inner frame if one hasn't already been created, and 1675 // saves a copy of the thread in L7_thread_cache 1676 static void create_inner_frame(MacroAssembler* masm, bool* already_created) { 1677 if (!*already_created) { 1678 __ save_frame(0); 1679 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below 1680 // Don't use save_thread because it smashes G2 and we merely want to save a 1681 // copy 1682 __ mov(G2_thread, L7_thread_cache); 1683 *already_created = true; 1684 } 1685 } 1686 1687 1688 static void save_or_restore_arguments(MacroAssembler* masm, 1689 const int stack_slots, 1690 const int total_in_args, 1691 const int arg_save_area, 1692 OopMap* map, 1693 VMRegPair* in_regs, 1694 BasicType* in_sig_bt) { 1695 // if map is non-NULL then the code should store the values, 1696 // otherwise it should load them. 1697 if (map != NULL) { 1698 // Fill in the map 1699 for (int i = 0; i < total_in_args; i++) { 1700 if (in_sig_bt[i] == T_ARRAY) { 1701 if (in_regs[i].first()->is_stack()) { 1702 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1703 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1704 } else if (in_regs[i].first()->is_Register()) { 1705 map->set_oop(in_regs[i].first()); 1706 } else { 1707 ShouldNotReachHere(); 1708 } 1709 } 1710 } 1711 } 1712 1713 // Save or restore double word values 1714 int handle_index = 0; 1715 for (int i = 0; i < total_in_args; i++) { 1716 int slot = handle_index + arg_save_area; 1717 int offset = slot * VMRegImpl::stack_slot_size; 1718 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) { 1719 const Register reg = in_regs[i].first()->as_Register(); 1720 if (reg->is_global()) { 1721 handle_index += 2; 1722 assert(handle_index <= stack_slots, "overflow"); 1723 if (map != NULL) { 1724 __ stx(reg, SP, offset + STACK_BIAS); 1725 } else { 1726 __ ldx(SP, offset + STACK_BIAS, reg); 1727 } 1728 } 1729 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) { 1730 handle_index += 2; 1731 assert(handle_index <= stack_slots, "overflow"); 1732 if (map != NULL) { 1733 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1734 } else { 1735 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1736 } 1737 } 1738 } 1739 // Save floats 1740 for (int i = 0; i < total_in_args; i++) { 1741 int slot = handle_index + arg_save_area; 1742 int offset = slot * VMRegImpl::stack_slot_size; 1743 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) { 1744 handle_index++; 1745 assert(handle_index <= stack_slots, "overflow"); 1746 if (map != NULL) { 1747 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1748 } else { 1749 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1750 } 1751 } 1752 } 1753 1754 } 1755 1756 1757 // Check GCLocker::needs_gc and enter the runtime if it's true. This 1758 // keeps a new JNI critical region from starting until a GC has been 1759 // forced. Save down any oops in registers and describe them in an 1760 // OopMap. 1761 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1762 const int stack_slots, 1763 const int total_in_args, 1764 const int arg_save_area, 1765 OopMapSet* oop_maps, 1766 VMRegPair* in_regs, 1767 BasicType* in_sig_bt) { 1768 __ block_comment("check GCLocker::needs_gc"); 1769 Label cont; 1770 AddressLiteral sync_state(GCLocker::needs_gc_address()); 1771 __ load_bool_contents(sync_state, G3_scratch); 1772 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont); 1773 __ delayed()->nop(); 1774 1775 // Save down any values that are live in registers and call into the 1776 // runtime to halt for a GC 1777 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1778 save_or_restore_arguments(masm, stack_slots, total_in_args, 1779 arg_save_area, map, in_regs, in_sig_bt); 1780 1781 __ mov(G2_thread, L7_thread_cache); 1782 1783 __ set_last_Java_frame(SP, noreg); 1784 1785 __ block_comment("block_for_jni_critical"); 1786 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type); 1787 __ delayed()->mov(L7_thread_cache, O0); 1788 oop_maps->add_gc_map( __ offset(), map); 1789 1790 __ restore_thread(L7_thread_cache); // restore G2_thread 1791 __ reset_last_Java_frame(); 1792 1793 // Reload all the register arguments 1794 save_or_restore_arguments(masm, stack_slots, total_in_args, 1795 arg_save_area, NULL, in_regs, in_sig_bt); 1796 1797 __ bind(cont); 1798 #ifdef ASSERT 1799 if (StressCriticalJNINatives) { 1800 // Stress register saving 1801 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1802 save_or_restore_arguments(masm, stack_slots, total_in_args, 1803 arg_save_area, map, in_regs, in_sig_bt); 1804 // Destroy argument registers 1805 for (int i = 0; i < total_in_args; i++) { 1806 if (in_regs[i].first()->is_Register()) { 1807 const Register reg = in_regs[i].first()->as_Register(); 1808 if (reg->is_global()) { 1809 __ mov(G0, reg); 1810 } 1811 } else if (in_regs[i].first()->is_FloatRegister()) { 1812 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1813 } 1814 } 1815 1816 save_or_restore_arguments(masm, stack_slots, total_in_args, 1817 arg_save_area, NULL, in_regs, in_sig_bt); 1818 } 1819 #endif 1820 } 1821 1822 // Unpack an array argument into a pointer to the body and the length 1823 // if the array is non-null, otherwise pass 0 for both. 1824 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { 1825 // Pass the length, ptr pair 1826 Label is_null, done; 1827 if (reg.first()->is_stack()) { 1828 VMRegPair tmp = reg64_to_VMRegPair(L2); 1829 // Load the arg up from the stack 1830 move_ptr(masm, reg, tmp); 1831 reg = tmp; 1832 } 1833 __ cmp(reg.first()->as_Register(), G0); 1834 __ brx(Assembler::equal, false, Assembler::pt, is_null); 1835 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4); 1836 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg); 1837 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4); 1838 move32_64(masm, reg64_to_VMRegPair(L4), length_arg); 1839 __ ba_short(done); 1840 __ bind(is_null); 1841 // Pass zeros 1842 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg); 1843 move32_64(masm, reg64_to_VMRegPair(G0), length_arg); 1844 __ bind(done); 1845 } 1846 1847 static void verify_oop_args(MacroAssembler* masm, 1848 methodHandle method, 1849 const BasicType* sig_bt, 1850 const VMRegPair* regs) { 1851 Register temp_reg = G5_method; // not part of any compiled calling seq 1852 if (VerifyOops) { 1853 for (int i = 0; i < method->size_of_parameters(); i++) { 1854 if (sig_bt[i] == T_OBJECT || 1855 sig_bt[i] == T_ARRAY) { 1856 VMReg r = regs[i].first(); 1857 assert(r->is_valid(), "bad oop arg"); 1858 if (r->is_stack()) { 1859 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1860 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg); 1861 __ ld_ptr(SP, ld_off, temp_reg); 1862 __ verify_oop(temp_reg); 1863 } else { 1864 __ verify_oop(r->as_Register()); 1865 } 1866 } 1867 } 1868 } 1869 } 1870 1871 static void gen_special_dispatch(MacroAssembler* masm, 1872 methodHandle method, 1873 const BasicType* sig_bt, 1874 const VMRegPair* regs) { 1875 verify_oop_args(masm, method, sig_bt, regs); 1876 vmIntrinsics::ID iid = method->intrinsic_id(); 1877 1878 // Now write the args into the outgoing interpreter space 1879 bool has_receiver = false; 1880 Register receiver_reg = noreg; 1881 int member_arg_pos = -1; 1882 Register member_reg = noreg; 1883 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1884 if (ref_kind != 0) { 1885 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1886 member_reg = G5_method; // known to be free at this point 1887 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1888 } else if (iid == vmIntrinsics::_invokeBasic) { 1889 has_receiver = true; 1890 } else { 1891 fatal("unexpected intrinsic id %d", iid); 1892 } 1893 1894 if (member_reg != noreg) { 1895 // Load the member_arg into register, if necessary. 1896 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1897 VMReg r = regs[member_arg_pos].first(); 1898 if (r->is_stack()) { 1899 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1900 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1901 __ ld_ptr(SP, ld_off, member_reg); 1902 } else { 1903 // no data motion is needed 1904 member_reg = r->as_Register(); 1905 } 1906 } 1907 1908 if (has_receiver) { 1909 // Make sure the receiver is loaded into a register. 1910 assert(method->size_of_parameters() > 0, "oob"); 1911 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1912 VMReg r = regs[0].first(); 1913 assert(r->is_valid(), "bad receiver arg"); 1914 if (r->is_stack()) { 1915 // Porting note: This assumes that compiled calling conventions always 1916 // pass the receiver oop in a register. If this is not true on some 1917 // platform, pick a temp and load the receiver from stack. 1918 fatal("receiver always in a register"); 1919 receiver_reg = G3_scratch; // known to be free at this point 1920 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1921 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1922 __ ld_ptr(SP, ld_off, receiver_reg); 1923 } else { 1924 // no data motion is needed 1925 receiver_reg = r->as_Register(); 1926 } 1927 } 1928 1929 // Figure out which address we are really jumping to: 1930 MethodHandles::generate_method_handle_dispatch(masm, iid, 1931 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1932 } 1933 1934 // --------------------------------------------------------------------------- 1935 // Generate a native wrapper for a given method. The method takes arguments 1936 // in the Java compiled code convention, marshals them to the native 1937 // convention (handlizes oops, etc), transitions to native, makes the call, 1938 // returns to java state (possibly blocking), unhandlizes any result and 1939 // returns. 1940 // 1941 // Critical native functions are a shorthand for the use of 1942 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1943 // functions. The wrapper is expected to unpack the arguments before 1944 // passing them to the callee and perform checks before and after the 1945 // native call to ensure that they GCLocker 1946 // lock_critical/unlock_critical semantics are followed. Some other 1947 // parts of JNI setup are skipped like the tear down of the JNI handle 1948 // block and the check for pending exceptions it's impossible for them 1949 // to be thrown. 1950 // 1951 // They are roughly structured like this: 1952 // if (GCLocker::needs_gc()) 1953 // SharedRuntime::block_for_jni_critical(); 1954 // tranistion to thread_in_native 1955 // unpack arrray arguments and call native entry point 1956 // check for safepoint in progress 1957 // check if any thread suspend flags are set 1958 // call into JVM and possible unlock the JNI critical 1959 // if a GC was suppressed while in the critical native. 1960 // transition back to thread_in_Java 1961 // return to caller 1962 // 1963 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1964 const methodHandle& method, 1965 int compile_id, 1966 BasicType* in_sig_bt, 1967 VMRegPair* in_regs, 1968 BasicType ret_type) { 1969 if (method->is_method_handle_intrinsic()) { 1970 vmIntrinsics::ID iid = method->intrinsic_id(); 1971 intptr_t start = (intptr_t)__ pc(); 1972 int vep_offset = ((intptr_t)__ pc()) - start; 1973 gen_special_dispatch(masm, 1974 method, 1975 in_sig_bt, 1976 in_regs); 1977 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1978 __ flush(); 1979 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1980 return nmethod::new_native_nmethod(method, 1981 compile_id, 1982 masm->code(), 1983 vep_offset, 1984 frame_complete, 1985 stack_slots / VMRegImpl::slots_per_word, 1986 in_ByteSize(-1), 1987 in_ByteSize(-1), 1988 (OopMapSet*)NULL); 1989 } 1990 bool is_critical_native = true; 1991 address native_func = method->critical_native_function(); 1992 if (native_func == NULL) { 1993 native_func = method->native_function(); 1994 is_critical_native = false; 1995 } 1996 assert(native_func != NULL, "must have function"); 1997 1998 // Native nmethod wrappers never take possesion of the oop arguments. 1999 // So the caller will gc the arguments. The only thing we need an 2000 // oopMap for is if the call is static 2001 // 2002 // An OopMap for lock (and class if static), and one for the VM call itself 2003 OopMapSet *oop_maps = new OopMapSet(); 2004 intptr_t start = (intptr_t)__ pc(); 2005 2006 // First thing make an ic check to see if we should even be here 2007 { 2008 Label L; 2009 const Register temp_reg = G3_scratch; 2010 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 2011 __ verify_oop(O0); 2012 __ load_klass(O0, temp_reg); 2013 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L); 2014 2015 __ jump_to(ic_miss, temp_reg); 2016 __ delayed()->nop(); 2017 __ align(CodeEntryAlignment); 2018 __ bind(L); 2019 } 2020 2021 int vep_offset = ((intptr_t)__ pc()) - start; 2022 2023 #ifdef COMPILER1 2024 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) { 2025 // Object.hashCode, System.identityHashCode can pull the hashCode from the 2026 // header word instead of doing a full VM transition once it's been computed. 2027 // Since hashCode is usually polymorphic at call sites we can't do this 2028 // optimization at the call site without a lot of work. 2029 Label slowCase; 2030 Label done; 2031 Register obj_reg = O0; 2032 Register result = O0; 2033 Register header = G3_scratch; 2034 Register hash = G3_scratch; // overwrite header value with hash value 2035 Register mask = G1; // to get hash field from header 2036 2037 // Unlike for Object.hashCode, System.identityHashCode is static method and 2038 // gets object as argument instead of the receiver. 2039 if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) { 2040 assert(method->is_static(), "method should be static"); 2041 // return 0 for null reference input 2042 __ br_null(obj_reg, false, Assembler::pn, done); 2043 __ delayed()->mov(obj_reg, hash); 2044 } 2045 2046 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked. 2047 // We depend on hash_mask being at most 32 bits and avoid the use of 2048 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 2049 // vm: see markOop.hpp. 2050 __ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header); 2051 __ sethi(markOopDesc::hash_mask, mask); 2052 __ btst(markOopDesc::unlocked_value, header); 2053 __ br(Assembler::zero, false, Assembler::pn, slowCase); 2054 if (UseBiasedLocking) { 2055 // Check if biased and fall through to runtime if so 2056 __ delayed()->nop(); 2057 __ btst(markOopDesc::biased_lock_bit_in_place, header); 2058 __ br(Assembler::notZero, false, Assembler::pn, slowCase); 2059 } 2060 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask); 2061 2062 // Check for a valid (non-zero) hash code and get its value. 2063 #ifdef _LP64 2064 __ srlx(header, markOopDesc::hash_shift, hash); 2065 #else 2066 __ srl(header, markOopDesc::hash_shift, hash); 2067 #endif 2068 __ andcc(hash, mask, hash); 2069 __ br(Assembler::equal, false, Assembler::pn, slowCase); 2070 __ delayed()->nop(); 2071 2072 // leaf return. 2073 __ bind(done); 2074 __ retl(); 2075 __ delayed()->mov(hash, result); 2076 __ bind(slowCase); 2077 } 2078 #endif // COMPILER1 2079 2080 2081 // We have received a description of where all the java arg are located 2082 // on entry to the wrapper. We need to convert these args to where 2083 // the jni function will expect them. To figure out where they go 2084 // we convert the java signature to a C signature by inserting 2085 // the hidden arguments as arg[0] and possibly arg[1] (static method) 2086 2087 const int total_in_args = method->size_of_parameters(); 2088 int total_c_args = total_in_args; 2089 int total_save_slots = 6 * VMRegImpl::slots_per_word; 2090 if (!is_critical_native) { 2091 total_c_args += 1; 2092 if (method->is_static()) { 2093 total_c_args++; 2094 } 2095 } else { 2096 for (int i = 0; i < total_in_args; i++) { 2097 if (in_sig_bt[i] == T_ARRAY) { 2098 // These have to be saved and restored across the safepoint 2099 total_c_args++; 2100 } 2101 } 2102 } 2103 2104 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 2105 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 2106 BasicType* in_elem_bt = NULL; 2107 2108 int argc = 0; 2109 if (!is_critical_native) { 2110 out_sig_bt[argc++] = T_ADDRESS; 2111 if (method->is_static()) { 2112 out_sig_bt[argc++] = T_OBJECT; 2113 } 2114 2115 for (int i = 0; i < total_in_args ; i++ ) { 2116 out_sig_bt[argc++] = in_sig_bt[i]; 2117 } 2118 } else { 2119 Thread* THREAD = Thread::current(); 2120 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); 2121 SignatureStream ss(method->signature()); 2122 for (int i = 0; i < total_in_args ; i++ ) { 2123 if (in_sig_bt[i] == T_ARRAY) { 2124 // Arrays are passed as int, elem* pair 2125 out_sig_bt[argc++] = T_INT; 2126 out_sig_bt[argc++] = T_ADDRESS; 2127 Symbol* atype = ss.as_symbol(CHECK_NULL); 2128 const char* at = atype->as_C_string(); 2129 if (strlen(at) == 2) { 2130 assert(at[0] == '[', "must be"); 2131 switch (at[1]) { 2132 case 'B': in_elem_bt[i] = T_BYTE; break; 2133 case 'C': in_elem_bt[i] = T_CHAR; break; 2134 case 'D': in_elem_bt[i] = T_DOUBLE; break; 2135 case 'F': in_elem_bt[i] = T_FLOAT; break; 2136 case 'I': in_elem_bt[i] = T_INT; break; 2137 case 'J': in_elem_bt[i] = T_LONG; break; 2138 case 'S': in_elem_bt[i] = T_SHORT; break; 2139 case 'Z': in_elem_bt[i] = T_BOOLEAN; break; 2140 default: ShouldNotReachHere(); 2141 } 2142 } 2143 } else { 2144 out_sig_bt[argc++] = in_sig_bt[i]; 2145 in_elem_bt[i] = T_VOID; 2146 } 2147 if (in_sig_bt[i] != T_VOID) { 2148 assert(in_sig_bt[i] == ss.type(), "must match"); 2149 ss.next(); 2150 } 2151 } 2152 } 2153 2154 // Now figure out where the args must be stored and how much stack space 2155 // they require (neglecting out_preserve_stack_slots but space for storing 2156 // the 1st six register arguments). It's weird see int_stk_helper. 2157 // 2158 int out_arg_slots; 2159 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 2160 2161 if (is_critical_native) { 2162 // Critical natives may have to call out so they need a save area 2163 // for register arguments. 2164 int double_slots = 0; 2165 int single_slots = 0; 2166 for ( int i = 0; i < total_in_args; i++) { 2167 if (in_regs[i].first()->is_Register()) { 2168 const Register reg = in_regs[i].first()->as_Register(); 2169 switch (in_sig_bt[i]) { 2170 case T_ARRAY: 2171 case T_BOOLEAN: 2172 case T_BYTE: 2173 case T_SHORT: 2174 case T_CHAR: 2175 case T_INT: assert(reg->is_in(), "don't need to save these"); break; 2176 case T_LONG: if (reg->is_global()) double_slots++; break; 2177 default: ShouldNotReachHere(); 2178 } 2179 } else if (in_regs[i].first()->is_FloatRegister()) { 2180 switch (in_sig_bt[i]) { 2181 case T_FLOAT: single_slots++; break; 2182 case T_DOUBLE: double_slots++; break; 2183 default: ShouldNotReachHere(); 2184 } 2185 } 2186 } 2187 total_save_slots = double_slots * 2 + single_slots; 2188 } 2189 2190 // Compute framesize for the wrapper. We need to handlize all oops in 2191 // registers. We must create space for them here that is disjoint from 2192 // the windowed save area because we have no control over when we might 2193 // flush the window again and overwrite values that gc has since modified. 2194 // (The live window race) 2195 // 2196 // We always just allocate 6 word for storing down these object. This allow 2197 // us to simply record the base and use the Ireg number to decide which 2198 // slot to use. (Note that the reg number is the inbound number not the 2199 // outbound number). 2200 // We must shuffle args to match the native convention, and include var-args space. 2201 2202 // Calculate the total number of stack slots we will need. 2203 2204 // First count the abi requirement plus all of the outgoing args 2205 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2206 2207 // Now the space for the inbound oop handle area 2208 2209 int oop_handle_offset = round_to(stack_slots, 2); 2210 stack_slots += total_save_slots; 2211 2212 // Now any space we need for handlizing a klass if static method 2213 2214 int klass_slot_offset = 0; 2215 int klass_offset = -1; 2216 int lock_slot_offset = 0; 2217 bool is_static = false; 2218 2219 if (method->is_static()) { 2220 klass_slot_offset = stack_slots; 2221 stack_slots += VMRegImpl::slots_per_word; 2222 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2223 is_static = true; 2224 } 2225 2226 // Plus a lock if needed 2227 2228 if (method->is_synchronized()) { 2229 lock_slot_offset = stack_slots; 2230 stack_slots += VMRegImpl::slots_per_word; 2231 } 2232 2233 // Now a place to save return value or as a temporary for any gpr -> fpr moves 2234 stack_slots += 2; 2235 2236 // Ok The space we have allocated will look like: 2237 // 2238 // 2239 // FP-> | | 2240 // |---------------------| 2241 // | 2 slots for moves | 2242 // |---------------------| 2243 // | lock box (if sync) | 2244 // |---------------------| <- lock_slot_offset 2245 // | klass (if static) | 2246 // |---------------------| <- klass_slot_offset 2247 // | oopHandle area | 2248 // |---------------------| <- oop_handle_offset 2249 // | outbound memory | 2250 // | based arguments | 2251 // | | 2252 // |---------------------| 2253 // | vararg area | 2254 // |---------------------| 2255 // | | 2256 // SP-> | out_preserved_slots | 2257 // 2258 // 2259 2260 2261 // Now compute actual number of stack words we need rounding to make 2262 // stack properly aligned. 2263 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word); 2264 2265 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2266 2267 // Generate stack overflow check before creating frame 2268 __ generate_stack_overflow_check(stack_size); 2269 2270 // Generate a new frame for the wrapper. 2271 __ save(SP, -stack_size, SP); 2272 2273 int frame_complete = ((intptr_t)__ pc()) - start; 2274 2275 __ verify_thread(); 2276 2277 if (is_critical_native) { 2278 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, 2279 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 2280 } 2281 2282 // 2283 // We immediately shuffle the arguments so that any vm call we have to 2284 // make from here on out (sync slow path, jvmti, etc.) we will have 2285 // captured the oops from our caller and have a valid oopMap for 2286 // them. 2287 2288 // ----------------- 2289 // The Grand Shuffle 2290 // 2291 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2292 // (derived from JavaThread* which is in L7_thread_cache) and, if static, 2293 // the class mirror instead of a receiver. This pretty much guarantees that 2294 // register layout will not match. We ignore these extra arguments during 2295 // the shuffle. The shuffle is described by the two calling convention 2296 // vectors we have in our possession. We simply walk the java vector to 2297 // get the source locations and the c vector to get the destinations. 2298 // Because we have a new window and the argument registers are completely 2299 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about 2300 // here. 2301 2302 // This is a trick. We double the stack slots so we can claim 2303 // the oops in the caller's frame. Since we are sure to have 2304 // more args than the caller doubling is enough to make 2305 // sure we can capture all the incoming oop args from the 2306 // caller. 2307 // 2308 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2309 // Record sp-based slot for receiver on stack for non-static methods 2310 int receiver_offset = -1; 2311 2312 // We move the arguments backward because the floating point registers 2313 // destination will always be to a register with a greater or equal register 2314 // number or the stack. 2315 2316 #ifdef ASSERT 2317 bool reg_destroyed[RegisterImpl::number_of_registers]; 2318 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2319 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2320 reg_destroyed[r] = false; 2321 } 2322 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2323 freg_destroyed[f] = false; 2324 } 2325 2326 #endif /* ASSERT */ 2327 2328 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) { 2329 2330 #ifdef ASSERT 2331 if (in_regs[i].first()->is_Register()) { 2332 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2333 } else if (in_regs[i].first()->is_FloatRegister()) { 2334 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!"); 2335 } 2336 if (out_regs[c_arg].first()->is_Register()) { 2337 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2338 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 2339 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true; 2340 } 2341 #endif /* ASSERT */ 2342 2343 switch (in_sig_bt[i]) { 2344 case T_ARRAY: 2345 if (is_critical_native) { 2346 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]); 2347 c_arg--; 2348 break; 2349 } 2350 case T_OBJECT: 2351 assert(!is_critical_native, "no oop arguments"); 2352 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2353 ((i == 0) && (!is_static)), 2354 &receiver_offset); 2355 break; 2356 case T_VOID: 2357 break; 2358 2359 case T_FLOAT: 2360 float_move(masm, in_regs[i], out_regs[c_arg]); 2361 break; 2362 2363 case T_DOUBLE: 2364 assert( i + 1 < total_in_args && 2365 in_sig_bt[i + 1] == T_VOID && 2366 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2367 double_move(masm, in_regs[i], out_regs[c_arg]); 2368 break; 2369 2370 case T_LONG : 2371 long_move(masm, in_regs[i], out_regs[c_arg]); 2372 break; 2373 2374 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2375 2376 default: 2377 move32_64(masm, in_regs[i], out_regs[c_arg]); 2378 } 2379 } 2380 2381 // Pre-load a static method's oop into O1. Used both by locking code and 2382 // the normal JNI call code. 2383 if (method->is_static() && !is_critical_native) { 2384 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1); 2385 2386 // Now handlize the static class mirror in O1. It's known not-null. 2387 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2388 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2389 __ add(SP, klass_offset + STACK_BIAS, O1); 2390 } 2391 2392 2393 const Register L6_handle = L6; 2394 2395 if (method->is_synchronized()) { 2396 assert(!is_critical_native, "unhandled"); 2397 __ mov(O1, L6_handle); 2398 } 2399 2400 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2401 // except O6/O7. So if we must call out we must push a new frame. We immediately 2402 // push a new frame and flush the windows. 2403 #ifdef _LP64 2404 intptr_t thepc = (intptr_t) __ pc(); 2405 { 2406 address here = __ pc(); 2407 // Call the next instruction 2408 __ call(here + 8, relocInfo::none); 2409 __ delayed()->nop(); 2410 } 2411 #else 2412 intptr_t thepc = __ load_pc_address(O7, 0); 2413 #endif /* _LP64 */ 2414 2415 // We use the same pc/oopMap repeatedly when we call out 2416 oop_maps->add_gc_map(thepc - start, map); 2417 2418 // O7 now has the pc loaded that we will use when we finally call to native. 2419 2420 // Save thread in L7; it crosses a bunch of VM calls below 2421 // Don't use save_thread because it smashes G2 and we merely 2422 // want to save a copy 2423 __ mov(G2_thread, L7_thread_cache); 2424 2425 2426 // If we create an inner frame once is plenty 2427 // when we create it we must also save G2_thread 2428 bool inner_frame_created = false; 2429 2430 // dtrace method entry support 2431 { 2432 SkipIfEqual skip_if( 2433 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2434 // create inner frame 2435 __ save_frame(0); 2436 __ mov(G2_thread, L7_thread_cache); 2437 __ set_metadata_constant(method(), O1); 2438 __ call_VM_leaf(L7_thread_cache, 2439 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2440 G2_thread, O1); 2441 __ restore(); 2442 } 2443 2444 // RedefineClasses() tracing support for obsolete method entry 2445 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2446 // create inner frame 2447 __ save_frame(0); 2448 __ mov(G2_thread, L7_thread_cache); 2449 __ set_metadata_constant(method(), O1); 2450 __ call_VM_leaf(L7_thread_cache, 2451 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2452 G2_thread, O1); 2453 __ restore(); 2454 } 2455 2456 // We are in the jni frame unless saved_frame is true in which case 2457 // we are in one frame deeper (the "inner" frame). If we are in the 2458 // "inner" frames the args are in the Iregs and if the jni frame then 2459 // they are in the Oregs. 2460 // If we ever need to go to the VM (for locking, jvmti) then 2461 // we will always be in the "inner" frame. 2462 2463 // Lock a synchronized method 2464 int lock_offset = -1; // Set if locked 2465 if (method->is_synchronized()) { 2466 Register Roop = O1; 2467 const Register L3_box = L3; 2468 2469 create_inner_frame(masm, &inner_frame_created); 2470 2471 __ ld_ptr(I1, 0, O1); 2472 Label done; 2473 2474 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size); 2475 __ add(FP, lock_offset+STACK_BIAS, L3_box); 2476 #ifdef ASSERT 2477 if (UseBiasedLocking) { 2478 // making the box point to itself will make it clear it went unused 2479 // but also be obviously invalid 2480 __ st_ptr(L3_box, L3_box, 0); 2481 } 2482 #endif // ASSERT 2483 // 2484 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch 2485 // 2486 __ compiler_lock_object(Roop, L1, L3_box, L2); 2487 __ br(Assembler::equal, false, Assembler::pt, done); 2488 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box); 2489 2490 2491 // None of the above fast optimizations worked so we have to get into the 2492 // slow case of monitor enter. Inline a special case of call_VM that 2493 // disallows any pending_exception. 2494 __ mov(Roop, O0); // Need oop in O0 2495 __ mov(L3_box, O1); 2496 2497 // Record last_Java_sp, in case the VM code releases the JVM lock. 2498 2499 __ set_last_Java_frame(FP, I7); 2500 2501 // do the call 2502 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2503 __ delayed()->mov(L7_thread_cache, O2); 2504 2505 __ restore_thread(L7_thread_cache); // restore G2_thread 2506 __ reset_last_Java_frame(); 2507 2508 #ifdef ASSERT 2509 { Label L; 2510 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2511 __ br_null_short(O0, Assembler::pt, L); 2512 __ stop("no pending exception allowed on exit from IR::monitorenter"); 2513 __ bind(L); 2514 } 2515 #endif 2516 __ bind(done); 2517 } 2518 2519 2520 // Finally just about ready to make the JNI call 2521 2522 __ flushw(); 2523 if (inner_frame_created) { 2524 __ restore(); 2525 } else { 2526 // Store only what we need from this frame 2527 // QQQ I think that non-v9 (like we care) we don't need these saves 2528 // either as the flush traps and the current window goes too. 2529 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2530 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2531 } 2532 2533 // get JNIEnv* which is first argument to native 2534 if (!is_critical_native) { 2535 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2536 } 2537 2538 // Use that pc we placed in O7 a while back as the current frame anchor 2539 __ set_last_Java_frame(SP, O7); 2540 2541 // We flushed the windows ages ago now mark them as flushed before transitioning. 2542 __ set(JavaFrameAnchor::flushed, G3_scratch); 2543 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 2544 2545 // Transition from _thread_in_Java to _thread_in_native. 2546 __ set(_thread_in_native, G3_scratch); 2547 2548 #ifdef _LP64 2549 AddressLiteral dest(native_func); 2550 __ relocate(relocInfo::runtime_call_type); 2551 __ jumpl_to(dest, O7, O7); 2552 #else 2553 __ call(native_func, relocInfo::runtime_call_type); 2554 #endif 2555 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2556 2557 __ restore_thread(L7_thread_cache); // restore G2_thread 2558 2559 // Unpack native results. For int-types, we do any needed sign-extension 2560 // and move things into I0. The return value there will survive any VM 2561 // calls for blocking or unlocking. An FP or OOP result (handle) is done 2562 // specially in the slow-path code. 2563 switch (ret_type) { 2564 case T_VOID: break; // Nothing to do! 2565 case T_FLOAT: break; // Got it where we want it (unless slow-path) 2566 case T_DOUBLE: break; // Got it where we want it (unless slow-path) 2567 // In 64 bits build result is in O0, in O0, O1 in 32bit build 2568 case T_LONG: 2569 #ifndef _LP64 2570 __ mov(O1, I1); 2571 #endif 2572 // Fall thru 2573 case T_OBJECT: // Really a handle 2574 case T_ARRAY: 2575 case T_INT: 2576 __ mov(O0, I0); 2577 break; 2578 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false 2579 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break; 2580 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value! 2581 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break; 2582 break; // Cannot de-handlize until after reclaiming jvm_lock 2583 default: 2584 ShouldNotReachHere(); 2585 } 2586 2587 Label after_transition; 2588 // must we block? 2589 2590 // Block, if necessary, before resuming in _thread_in_Java state. 2591 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2592 { Label no_block; 2593 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 2594 2595 // Switch thread to "native transition" state before reading the synchronization state. 2596 // This additional state is necessary because reading and testing the synchronization 2597 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2598 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2599 // VM thread changes sync state to synchronizing and suspends threads for GC. 2600 // Thread A is resumed to finish this native method, but doesn't block here since it 2601 // didn't see any synchronization is progress, and escapes. 2602 __ set(_thread_in_native_trans, G3_scratch); 2603 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2604 if(os::is_MP()) { 2605 if (UseMembar) { 2606 // Force this write out before the read below 2607 __ membar(Assembler::StoreLoad); 2608 } else { 2609 // Write serialization page so VM thread can do a pseudo remote membar. 2610 // We use the current thread pointer to calculate a thread specific 2611 // offset to write to within the page. This minimizes bus traffic 2612 // due to cache line collision. 2613 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 2614 } 2615 } 2616 __ load_contents(sync_state, G3_scratch); 2617 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 2618 2619 Label L; 2620 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); 2621 __ br(Assembler::notEqual, false, Assembler::pn, L); 2622 __ delayed()->ld(suspend_state, G3_scratch); 2623 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 2624 __ bind(L); 2625 2626 // Block. Save any potential method result value before the operation and 2627 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2628 // lets us share the oopMap we used when we went native rather the create 2629 // a distinct one for this pc 2630 // 2631 save_native_result(masm, ret_type, stack_slots); 2632 if (!is_critical_native) { 2633 __ call_VM_leaf(L7_thread_cache, 2634 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2635 G2_thread); 2636 } else { 2637 __ call_VM_leaf(L7_thread_cache, 2638 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), 2639 G2_thread); 2640 } 2641 2642 // Restore any method result value 2643 restore_native_result(masm, ret_type, stack_slots); 2644 2645 if (is_critical_native) { 2646 // The call above performed the transition to thread_in_Java so 2647 // skip the transition logic below. 2648 __ ba(after_transition); 2649 __ delayed()->nop(); 2650 } 2651 2652 __ bind(no_block); 2653 } 2654 2655 // thread state is thread_in_native_trans. Any safepoint blocking has already 2656 // happened so we can now change state to _thread_in_Java. 2657 __ set(_thread_in_Java, G3_scratch); 2658 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2659 __ bind(after_transition); 2660 2661 Label no_reguard; 2662 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch); 2663 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_reserved_disabled, Assembler::notEqual, Assembler::pt, no_reguard); 2664 2665 save_native_result(masm, ret_type, stack_slots); 2666 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2667 __ delayed()->nop(); 2668 2669 __ restore_thread(L7_thread_cache); // restore G2_thread 2670 restore_native_result(masm, ret_type, stack_slots); 2671 2672 __ bind(no_reguard); 2673 2674 // Handle possible exception (will unlock if necessary) 2675 2676 // native result if any is live in freg or I0 (and I1 if long and 32bit vm) 2677 2678 // Unlock 2679 if (method->is_synchronized()) { 2680 Label done; 2681 Register I2_ex_oop = I2; 2682 const Register L3_box = L3; 2683 // Get locked oop from the handle we passed to jni 2684 __ ld_ptr(L6_handle, 0, L4); 2685 __ add(SP, lock_offset+STACK_BIAS, L3_box); 2686 // Must save pending exception around the slow-path VM call. Since it's a 2687 // leaf call, the pending exception (if any) can be kept in a register. 2688 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop); 2689 // Now unlock 2690 // (Roop, Rmark, Rbox, Rscratch) 2691 __ compiler_unlock_object(L4, L1, L3_box, L2); 2692 __ br(Assembler::equal, false, Assembler::pt, done); 2693 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box); 2694 2695 // save and restore any potential method result value around the unlocking 2696 // operation. Will save in I0 (or stack for FP returns). 2697 save_native_result(masm, ret_type, stack_slots); 2698 2699 // Must clear pending-exception before re-entering the VM. Since this is 2700 // a leaf call, pending-exception-oop can be safely kept in a register. 2701 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset())); 2702 2703 // slow case of monitor enter. Inline a special case of call_VM that 2704 // disallows any pending_exception. 2705 __ mov(L3_box, O1); 2706 2707 // Pass in current thread pointer 2708 __ mov(G2_thread, O2); 2709 2710 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type); 2711 __ delayed()->mov(L4, O0); // Need oop in O0 2712 2713 __ restore_thread(L7_thread_cache); // restore G2_thread 2714 2715 #ifdef ASSERT 2716 { Label L; 2717 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2718 __ br_null_short(O0, Assembler::pt, L); 2719 __ stop("no pending exception allowed on exit from IR::monitorexit"); 2720 __ bind(L); 2721 } 2722 #endif 2723 restore_native_result(masm, ret_type, stack_slots); 2724 // check_forward_pending_exception jump to forward_exception if any pending 2725 // exception is set. The forward_exception routine expects to see the 2726 // exception in pending_exception and not in a register. Kind of clumsy, 2727 // since all folks who branch to forward_exception must have tested 2728 // pending_exception first and hence have it in a register already. 2729 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset())); 2730 __ bind(done); 2731 } 2732 2733 // Tell dtrace about this method exit 2734 { 2735 SkipIfEqual skip_if( 2736 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2737 save_native_result(masm, ret_type, stack_slots); 2738 __ set_metadata_constant(method(), O1); 2739 __ call_VM_leaf(L7_thread_cache, 2740 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2741 G2_thread, O1); 2742 restore_native_result(masm, ret_type, stack_slots); 2743 } 2744 2745 // Clear "last Java frame" SP and PC. 2746 __ verify_thread(); // G2_thread must be correct 2747 __ reset_last_Java_frame(); 2748 2749 // Unpack oop result 2750 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2751 Label L; 2752 __ addcc(G0, I0, G0); 2753 __ brx(Assembler::notZero, true, Assembler::pt, L); 2754 __ delayed()->ld_ptr(I0, 0, I0); 2755 __ mov(G0, I0); 2756 __ bind(L); 2757 __ verify_oop(I0); 2758 } 2759 2760 if (!is_critical_native) { 2761 // reset handle block 2762 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2763 __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2764 2765 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2766 check_forward_pending_exception(masm, G3_scratch); 2767 } 2768 2769 2770 // Return 2771 2772 #ifndef _LP64 2773 if (ret_type == T_LONG) { 2774 2775 // Must leave proper result in O0,O1 and G1 (c2/tiered only) 2776 __ sllx(I0, 32, G1); // Shift bits into high G1 2777 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 2778 __ or3 (I1, G1, G1); // OR 64 bits into G1 2779 } 2780 #endif 2781 2782 __ ret(); 2783 __ delayed()->restore(); 2784 2785 __ flush(); 2786 2787 nmethod *nm = nmethod::new_native_nmethod(method, 2788 compile_id, 2789 masm->code(), 2790 vep_offset, 2791 frame_complete, 2792 stack_slots / VMRegImpl::slots_per_word, 2793 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2794 in_ByteSize(lock_offset), 2795 oop_maps); 2796 2797 if (is_critical_native) { 2798 nm->set_lazy_critical_native(true); 2799 } 2800 return nm; 2801 2802 } 2803 2804 // this function returns the adjust size (in number of words) to a c2i adapter 2805 // activation for use during deoptimization 2806 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2807 assert(callee_locals >= callee_parameters, 2808 "test and remove; got more parms than locals"); 2809 if (callee_locals < callee_parameters) 2810 return 0; // No adjustment for negative locals 2811 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2812 return round_to(diff, WordsPerLong); 2813 } 2814 2815 // "Top of Stack" slots that may be unused by the calling convention but must 2816 // otherwise be preserved. 2817 // On Intel these are not necessary and the value can be zero. 2818 // On Sparc this describes the words reserved for storing a register window 2819 // when an interrupt occurs. 2820 uint SharedRuntime::out_preserve_stack_slots() { 2821 return frame::register_save_words * VMRegImpl::slots_per_word; 2822 } 2823 2824 static void gen_new_frame(MacroAssembler* masm, bool deopt) { 2825 // 2826 // Common out the new frame generation for deopt and uncommon trap 2827 // 2828 Register G3pcs = G3_scratch; // Array of new pcs (input) 2829 Register Oreturn0 = O0; 2830 Register Oreturn1 = O1; 2831 Register O2UnrollBlock = O2; 2832 Register O3array = O3; // Array of frame sizes (input) 2833 Register O4array_size = O4; // number of frames (input) 2834 Register O7frame_size = O7; // number of frames (input) 2835 2836 __ ld_ptr(O3array, 0, O7frame_size); 2837 __ sub(G0, O7frame_size, O7frame_size); 2838 __ save(SP, O7frame_size, SP); 2839 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc 2840 2841 #ifdef ASSERT 2842 // make sure that the frames are aligned properly 2843 #ifndef _LP64 2844 __ btst(wordSize*2-1, SP); 2845 __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc); 2846 #endif 2847 #endif 2848 2849 // Deopt needs to pass some extra live values from frame to frame 2850 2851 if (deopt) { 2852 __ mov(Oreturn0->after_save(), Oreturn0); 2853 __ mov(Oreturn1->after_save(), Oreturn1); 2854 } 2855 2856 __ mov(O4array_size->after_save(), O4array_size); 2857 __ sub(O4array_size, 1, O4array_size); 2858 __ mov(O3array->after_save(), O3array); 2859 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock); 2860 __ add(G3pcs, wordSize, G3pcs); // point to next pc value 2861 2862 #ifdef ASSERT 2863 // trash registers to show a clear pattern in backtraces 2864 __ set(0xDEAD0000, I0); 2865 __ add(I0, 2, I1); 2866 __ add(I0, 4, I2); 2867 __ add(I0, 6, I3); 2868 __ add(I0, 8, I4); 2869 // Don't touch I5 could have valuable savedSP 2870 __ set(0xDEADBEEF, L0); 2871 __ mov(L0, L1); 2872 __ mov(L0, L2); 2873 __ mov(L0, L3); 2874 __ mov(L0, L4); 2875 __ mov(L0, L5); 2876 2877 // trash the return value as there is nothing to return yet 2878 __ set(0xDEAD0001, O7); 2879 #endif 2880 2881 __ mov(SP, O5_savedSP); 2882 } 2883 2884 2885 static void make_new_frames(MacroAssembler* masm, bool deopt) { 2886 // 2887 // loop through the UnrollBlock info and create new frames 2888 // 2889 Register G3pcs = G3_scratch; 2890 Register Oreturn0 = O0; 2891 Register Oreturn1 = O1; 2892 Register O2UnrollBlock = O2; 2893 Register O3array = O3; 2894 Register O4array_size = O4; 2895 Label loop; 2896 2897 #ifdef ASSERT 2898 // Compilers generate code that bang the stack by as much as the 2899 // interpreter would need. So this stack banging should never 2900 // trigger a fault. Verify that it does not on non product builds. 2901 if (UseStackBanging) { 2902 // Get total frame size for interpreted frames 2903 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4); 2904 __ bang_stack_size(O4, O3, G3_scratch); 2905 } 2906 #endif 2907 2908 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size); 2909 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs); 2910 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array); 2911 2912 // Adjust old interpreter frame to make space for new frame's extra java locals 2913 // 2914 // We capture the original sp for the transition frame only because it is needed in 2915 // order to properly calculate interpreter_sp_adjustment. Even though in real life 2916 // every interpreter frame captures a savedSP it is only needed at the transition 2917 // (fortunately). If we had to have it correct everywhere then we would need to 2918 // be told the sp_adjustment for each frame we create. If the frame size array 2919 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 2920 // for each frame we create and keep up the illusion every where. 2921 // 2922 2923 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7); 2924 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 2925 __ sub(SP, O7, SP); 2926 2927 #ifdef ASSERT 2928 // make sure that there is at least one entry in the array 2929 __ tst(O4array_size); 2930 __ breakpoint_trap(Assembler::zero, Assembler::icc); 2931 #endif 2932 2933 // Now push the new interpreter frames 2934 __ bind(loop); 2935 2936 // allocate a new frame, filling the registers 2937 2938 gen_new_frame(masm, deopt); // allocate an interpreter frame 2939 2940 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop); 2941 __ delayed()->add(O3array, wordSize, O3array); 2942 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc 2943 2944 } 2945 2946 //------------------------------generate_deopt_blob---------------------------- 2947 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 2948 // instead. 2949 void SharedRuntime::generate_deopt_blob() { 2950 // allocate space for the code 2951 ResourceMark rm; 2952 // setup code generation tools 2953 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 2954 #ifdef ASSERT 2955 if (UseStackBanging) { 2956 pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32; 2957 } 2958 #endif 2959 #if INCLUDE_JVMCI 2960 if (EnableJVMCI) { 2961 pad += 1000; // Increase the buffer size when compiling for JVMCI 2962 } 2963 #endif 2964 #ifdef _LP64 2965 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 2966 #else 2967 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread) 2968 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread) 2969 CodeBuffer buffer("deopt_blob", 1600+pad, 512); 2970 #endif /* _LP64 */ 2971 MacroAssembler* masm = new MacroAssembler(&buffer); 2972 FloatRegister Freturn0 = F0; 2973 Register Greturn1 = G1; 2974 Register Oreturn0 = O0; 2975 Register Oreturn1 = O1; 2976 Register O2UnrollBlock = O2; 2977 Register L0deopt_mode = L0; 2978 Register G4deopt_mode = G4_scratch; 2979 int frame_size_words; 2980 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS); 2981 #if !defined(_LP64) && defined(COMPILER2) 2982 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS); 2983 #endif 2984 Label cont; 2985 2986 OopMapSet *oop_maps = new OopMapSet(); 2987 2988 // 2989 // This is the entry point for code which is returning to a de-optimized 2990 // frame. 2991 // The steps taken by this frame are as follows: 2992 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1) 2993 // and all potentially live registers (at a pollpoint many registers can be live). 2994 // 2995 // - call the C routine: Deoptimization::fetch_unroll_info (this function 2996 // returns information about the number and size of interpreter frames 2997 // which are equivalent to the frame which is being deoptimized) 2998 // - deallocate the unpack frame, restoring only results values. Other 2999 // volatile registers will now be captured in the vframeArray as needed. 3000 // - deallocate the deoptimization frame 3001 // - in a loop using the information returned in the previous step 3002 // push new interpreter frames (take care to propagate the return 3003 // values through each new frame pushed) 3004 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0) 3005 // - call the C routine: Deoptimization::unpack_frames (this function 3006 // lays out values on the interpreter frame which was just created) 3007 // - deallocate the dummy unpack_frame 3008 // - ensure that all the return values are correctly set and then do 3009 // a return to the interpreter entry point 3010 // 3011 // Refer to the following methods for more information: 3012 // - Deoptimization::fetch_unroll_info 3013 // - Deoptimization::unpack_frames 3014 3015 OopMap* map = NULL; 3016 3017 int start = __ offset(); 3018 3019 // restore G2, the trampoline destroyed it 3020 __ get_thread(); 3021 3022 // On entry we have been called by the deoptimized nmethod with a call that 3023 // replaced the original call (or safepoint polling location) so the deoptimizing 3024 // pc is now in O7. Return values are still in the expected places 3025 3026 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3027 __ ba(cont); 3028 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode); 3029 3030 3031 #if INCLUDE_JVMCI 3032 Label after_fetch_unroll_info_call; 3033 int implicit_exception_uncommon_trap_offset = 0; 3034 int uncommon_trap_offset = 0; 3035 3036 if (EnableJVMCI) { 3037 masm->block_comment("BEGIN implicit_exception_uncommon_trap"); 3038 implicit_exception_uncommon_trap_offset = __ offset() - start; 3039 3040 __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()), O7); 3041 __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 3042 __ add(O7, -8, O7); 3043 3044 uncommon_trap_offset = __ offset() - start; 3045 3046 // Save everything in sight. 3047 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3048 __ set_last_Java_frame(SP, NULL); 3049 3050 __ ld(G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()), O1); 3051 __ sub(G0, 1, L1); 3052 __ st(L1, G2_thread, in_bytes(JavaThread::pending_deoptimization_offset())); 3053 3054 __ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode); 3055 __ mov(G2_thread, O0); 3056 __ mov(L0deopt_mode, O2); 3057 __ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)); 3058 __ delayed()->nop(); 3059 oop_maps->add_gc_map( __ offset()-start, map->deep_copy()); 3060 __ get_thread(); 3061 __ add(O7, 8, O7); 3062 __ reset_last_Java_frame(); 3063 3064 __ ba(after_fetch_unroll_info_call); 3065 __ delayed()->nop(); // Delay slot 3066 masm->block_comment("END implicit_exception_uncommon_trap"); 3067 } // EnableJVMCI 3068 #endif // INCLUDE_JVMCI 3069 3070 int exception_offset = __ offset() - start; 3071 3072 // restore G2, the trampoline destroyed it 3073 __ get_thread(); 3074 3075 // On entry we have been jumped to by the exception handler (or exception_blob 3076 // for server). O0 contains the exception oop and O7 contains the original 3077 // exception pc. So if we push a frame here it will look to the 3078 // stack walking code (fetch_unroll_info) just like a normal call so 3079 // state will be extracted normally. 3080 3081 // save exception oop in JavaThread and fall through into the 3082 // exception_in_tls case since they are handled in same way except 3083 // for where the pending exception is kept. 3084 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); 3085 3086 // 3087 // Vanilla deoptimization with an exception pending in exception_oop 3088 // 3089 int exception_in_tls_offset = __ offset() - start; 3090 3091 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3092 // Opens a new stack frame 3093 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3094 3095 // Restore G2_thread 3096 __ get_thread(); 3097 3098 #ifdef ASSERT 3099 { 3100 // verify that there is really an exception oop in exception_oop 3101 Label has_exception; 3102 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); 3103 __ br_notnull_short(Oexception, Assembler::pt, has_exception); 3104 __ stop("no exception in thread"); 3105 __ bind(has_exception); 3106 3107 // verify that there is no pending exception 3108 Label no_pending_exception; 3109 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 3110 __ ld_ptr(exception_addr, Oexception); 3111 __ br_null_short(Oexception, Assembler::pt, no_pending_exception); 3112 __ stop("must not have pending exception here"); 3113 __ bind(no_pending_exception); 3114 } 3115 #endif 3116 3117 __ ba(cont); 3118 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);; 3119 3120 // 3121 // Reexecute entry, similar to c2 uncommon trap 3122 // 3123 int reexecute_offset = __ offset() - start; 3124 #if INCLUDE_JVMCI && !defined(COMPILER1) 3125 if (EnableJVMCI && UseJVMCICompiler) { 3126 // JVMCI does not use this kind of deoptimization 3127 __ should_not_reach_here(); 3128 } 3129 #endif 3130 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3131 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3132 3133 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode); 3134 3135 __ bind(cont); 3136 3137 __ set_last_Java_frame(SP, noreg); 3138 3139 // do the call by hand so we can get the oopmap 3140 3141 __ mov(G2_thread, L7_thread_cache); 3142 __ mov(L0deopt_mode, O1); 3143 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); 3144 __ delayed()->mov(G2_thread, O0); 3145 3146 // Set an oopmap for the call site this describes all our saved volatile registers 3147 3148 oop_maps->add_gc_map( __ offset()-start, map); 3149 3150 __ mov(L7_thread_cache, G2_thread); 3151 3152 __ reset_last_Java_frame(); 3153 3154 #if INCLUDE_JVMCI 3155 if (EnableJVMCI) { 3156 __ bind(after_fetch_unroll_info_call); 3157 } 3158 #endif 3159 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers 3160 // so this move will survive 3161 3162 __ mov(L0deopt_mode, G4deopt_mode); 3163 3164 __ mov(O0, O2UnrollBlock->after_save()); 3165 3166 RegisterSaver::restore_result_registers(masm); 3167 3168 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), G4deopt_mode); 3169 Label noException; 3170 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException); 3171 3172 // Move the pending exception from exception_oop to Oexception so 3173 // the pending exception will be picked up the interpreter. 3174 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3175 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3176 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 3177 __ bind(noException); 3178 3179 // deallocate the deoptimization frame taking care to preserve the return values 3180 __ mov(Oreturn0, Oreturn0->after_save()); 3181 __ mov(Oreturn1, Oreturn1->after_save()); 3182 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3183 __ restore(); 3184 3185 // Allocate new interpreter frame(s) and possible c2i adapter frame 3186 3187 make_new_frames(masm, true); 3188 3189 // push a dummy "unpack_frame" taking care of float return values and 3190 // call Deoptimization::unpack_frames to have the unpacker layout 3191 // information in the interpreter frames just created and then return 3192 // to the interpreter entry point 3193 __ save(SP, -frame_size_words*wordSize, SP); 3194 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); 3195 #if !defined(_LP64) 3196 #if defined(COMPILER2) 3197 // 32-bit 1-register longs return longs in G1 3198 __ stx(Greturn1, saved_Greturn1_addr); 3199 #endif 3200 __ set_last_Java_frame(SP, noreg); 3201 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode); 3202 #else 3203 // LP64 uses g4 in set_last_Java_frame 3204 __ mov(G4deopt_mode, O1); 3205 __ set_last_Java_frame(SP, G0); 3206 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); 3207 #endif 3208 __ reset_last_Java_frame(); 3209 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); 3210 3211 #if !defined(_LP64) && defined(COMPILER2) 3212 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into 3213 // I0/I1 if the return value is long. 3214 Label not_long; 3215 __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long); 3216 __ ldd(saved_Greturn1_addr,I0); 3217 __ bind(not_long); 3218 #endif 3219 __ ret(); 3220 __ delayed()->restore(); 3221 3222 masm->flush(); 3223 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words); 3224 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3225 #if INCLUDE_JVMCI 3226 if (EnableJVMCI) { 3227 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); 3228 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); 3229 } 3230 #endif 3231 } 3232 3233 #ifdef COMPILER2 3234 3235 //------------------------------generate_uncommon_trap_blob-------------------- 3236 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3237 // instead. 3238 void SharedRuntime::generate_uncommon_trap_blob() { 3239 // allocate space for the code 3240 ResourceMark rm; 3241 // setup code generation tools 3242 int pad = VerifyThread ? 512 : 0; 3243 #ifdef ASSERT 3244 if (UseStackBanging) { 3245 pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32; 3246 } 3247 #endif 3248 #ifdef _LP64 3249 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 3250 #else 3251 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread) 3252 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread) 3253 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512); 3254 #endif 3255 MacroAssembler* masm = new MacroAssembler(&buffer); 3256 Register O2UnrollBlock = O2; 3257 Register O2klass_index = O2; 3258 3259 // 3260 // This is the entry point for all traps the compiler takes when it thinks 3261 // it cannot handle further execution of compilation code. The frame is 3262 // deoptimized in these cases and converted into interpreter frames for 3263 // execution 3264 // The steps taken by this frame are as follows: 3265 // - push a fake "unpack_frame" 3266 // - call the C routine Deoptimization::uncommon_trap (this function 3267 // packs the current compiled frame into vframe arrays and returns 3268 // information about the number and size of interpreter frames which 3269 // are equivalent to the frame which is being deoptimized) 3270 // - deallocate the "unpack_frame" 3271 // - deallocate the deoptimization frame 3272 // - in a loop using the information returned in the previous step 3273 // push interpreter frames; 3274 // - create a dummy "unpack_frame" 3275 // - call the C routine: Deoptimization::unpack_frames (this function 3276 // lays out values on the interpreter frame which was just created) 3277 // - deallocate the dummy unpack_frame 3278 // - return to the interpreter entry point 3279 // 3280 // Refer to the following methods for more information: 3281 // - Deoptimization::uncommon_trap 3282 // - Deoptimization::unpack_frame 3283 3284 // the unloaded class index is in O0 (first parameter to this blob) 3285 3286 // push a dummy "unpack_frame" 3287 // and call Deoptimization::uncommon_trap to pack the compiled frame into 3288 // vframe array and return the UnrollBlock information 3289 __ save_frame(0); 3290 __ set_last_Java_frame(SP, noreg); 3291 __ mov(I0, O2klass_index); 3292 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // exec mode 3293 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index, O3); 3294 __ reset_last_Java_frame(); 3295 __ mov(O0, O2UnrollBlock->after_save()); 3296 __ restore(); 3297 3298 // deallocate the deoptimized frame taking care to preserve the return values 3299 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3300 __ restore(); 3301 3302 #ifdef ASSERT 3303 { Label L; 3304 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), O1); 3305 __ cmp_and_br_short(O1, Deoptimization::Unpack_uncommon_trap, Assembler::equal, Assembler::pt, L); 3306 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap"); 3307 __ bind(L); 3308 } 3309 #endif 3310 3311 // Allocate new interpreter frame(s) and possible c2i adapter frame 3312 3313 make_new_frames(masm, false); 3314 3315 // push a dummy "unpack_frame" taking care of float return values and 3316 // call Deoptimization::unpack_frames to have the unpacker layout 3317 // information in the interpreter frames just created and then return 3318 // to the interpreter entry point 3319 __ save_frame(0); 3320 __ set_last_Java_frame(SP, noreg); 3321 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case 3322 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3); 3323 __ reset_last_Java_frame(); 3324 __ ret(); 3325 __ delayed()->restore(); 3326 3327 masm->flush(); 3328 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize); 3329 } 3330 3331 #endif // COMPILER2 3332 3333 //------------------------------generate_handler_blob------------------- 3334 // 3335 // Generate a special Compile2Runtime blob that saves all registers, and sets 3336 // up an OopMap. 3337 // 3338 // This blob is jumped to (via a breakpoint and the signal handler) from a 3339 // safepoint in compiled code. On entry to this blob, O7 contains the 3340 // address in the original nmethod at which we should resume normal execution. 3341 // Thus, this blob looks like a subroutine which must preserve lots of 3342 // registers and return normally. Note that O7 is never register-allocated, 3343 // so it is guaranteed to be free here. 3344 // 3345 3346 // The hardest part of what this blob must do is to save the 64-bit %o 3347 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and 3348 // an interrupt will chop off their heads. Making space in the caller's frame 3349 // first will let us save the 64-bit %o's before save'ing, but we cannot hand 3350 // the adjusted FP off to the GC stack-crawler: this will modify the caller's 3351 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save 3352 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). 3353 // Tricky, tricky, tricky... 3354 3355 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3356 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3357 3358 // allocate space for the code 3359 ResourceMark rm; 3360 // setup code generation tools 3361 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3362 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3363 // even larger with TraceJumps 3364 int pad = TraceJumps ? 512 : 0; 3365 CodeBuffer buffer("handler_blob", 1600 + pad, 512); 3366 MacroAssembler* masm = new MacroAssembler(&buffer); 3367 int frame_size_words; 3368 OopMapSet *oop_maps = new OopMapSet(); 3369 OopMap* map = NULL; 3370 3371 int start = __ offset(); 3372 3373 bool cause_return = (poll_type == POLL_AT_RETURN); 3374 // If this causes a return before the processing, then do a "restore" 3375 if (cause_return) { 3376 __ restore(); 3377 } else { 3378 // Make it look like we were called via the poll 3379 // so that frame constructor always sees a valid return address 3380 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7); 3381 __ sub(O7, frame::pc_return_offset, O7); 3382 } 3383 3384 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3385 3386 // setup last_Java_sp (blows G4) 3387 __ set_last_Java_frame(SP, noreg); 3388 3389 // call into the runtime to handle illegal instructions exception 3390 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3391 __ mov(G2_thread, O0); 3392 __ save_thread(L7_thread_cache); 3393 __ call(call_ptr); 3394 __ delayed()->nop(); 3395 3396 // Set an oopmap for the call site. 3397 // We need this not only for callee-saved registers, but also for volatile 3398 // registers that the compiler might be keeping live across a safepoint. 3399 3400 oop_maps->add_gc_map( __ offset() - start, map); 3401 3402 __ restore_thread(L7_thread_cache); 3403 // clear last_Java_sp 3404 __ reset_last_Java_frame(); 3405 3406 // Check for exceptions 3407 Label pending; 3408 3409 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3410 __ br_notnull_short(O1, Assembler::pn, pending); 3411 3412 RegisterSaver::restore_live_registers(masm); 3413 3414 // We are back the the original state on entry and ready to go. 3415 3416 __ retl(); 3417 __ delayed()->nop(); 3418 3419 // Pending exception after the safepoint 3420 3421 __ bind(pending); 3422 3423 RegisterSaver::restore_live_registers(masm); 3424 3425 // We are back the the original state on entry. 3426 3427 // Tail-call forward_exception_entry, with the issuing PC in O7, 3428 // so it looks like the original nmethod called forward_exception_entry. 3429 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3430 __ JMP(O0, 0); 3431 __ delayed()->nop(); 3432 3433 // ------------- 3434 // make sure all code is generated 3435 masm->flush(); 3436 3437 // return exception blob 3438 return SafepointBlob::create(&buffer, oop_maps, frame_size_words); 3439 } 3440 3441 // 3442 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3443 // 3444 // Generate a stub that calls into vm to find out the proper destination 3445 // of a java call. All the argument registers are live at this point 3446 // but since this is generic code we don't know what they are and the caller 3447 // must do any gc of the args. 3448 // 3449 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3450 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3451 3452 // allocate space for the code 3453 ResourceMark rm; 3454 // setup code generation tools 3455 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3456 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3457 // even larger with TraceJumps 3458 int pad = TraceJumps ? 512 : 0; 3459 CodeBuffer buffer(name, 1600 + pad, 512); 3460 MacroAssembler* masm = new MacroAssembler(&buffer); 3461 int frame_size_words; 3462 OopMapSet *oop_maps = new OopMapSet(); 3463 OopMap* map = NULL; 3464 3465 int start = __ offset(); 3466 3467 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3468 3469 int frame_complete = __ offset(); 3470 3471 // setup last_Java_sp (blows G4) 3472 __ set_last_Java_frame(SP, noreg); 3473 3474 // call into the runtime to handle illegal instructions exception 3475 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3476 __ mov(G2_thread, O0); 3477 __ save_thread(L7_thread_cache); 3478 __ call(destination, relocInfo::runtime_call_type); 3479 __ delayed()->nop(); 3480 3481 // O0 contains the address we are going to jump to assuming no exception got installed 3482 3483 // Set an oopmap for the call site. 3484 // We need this not only for callee-saved registers, but also for volatile 3485 // registers that the compiler might be keeping live across a safepoint. 3486 3487 oop_maps->add_gc_map( __ offset() - start, map); 3488 3489 __ restore_thread(L7_thread_cache); 3490 // clear last_Java_sp 3491 __ reset_last_Java_frame(); 3492 3493 // Check for exceptions 3494 Label pending; 3495 3496 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3497 __ br_notnull_short(O1, Assembler::pn, pending); 3498 3499 // get the returned Method* 3500 3501 __ get_vm_result_2(G5_method); 3502 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 3503 3504 // O0 is where we want to jump, overwrite G3 which is saved and scratch 3505 3506 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 3507 3508 RegisterSaver::restore_live_registers(masm); 3509 3510 // We are back the the original state on entry and ready to go. 3511 3512 __ JMP(G3, 0); 3513 __ delayed()->nop(); 3514 3515 // Pending exception after the safepoint 3516 3517 __ bind(pending); 3518 3519 RegisterSaver::restore_live_registers(masm); 3520 3521 // We are back the the original state on entry. 3522 3523 // Tail-call forward_exception_entry, with the issuing PC in O7, 3524 // so it looks like the original nmethod called forward_exception_entry. 3525 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3526 __ JMP(O0, 0); 3527 __ delayed()->nop(); 3528 3529 // ------------- 3530 // make sure all code is generated 3531 masm->flush(); 3532 3533 // return the blob 3534 // frame_size_words or bytes?? 3535 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3536 }