1 /* 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "code/debugInfoRec.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/vtableStubs.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "oops/compiledICHolder.hpp" 33 #include "prims/jvmtiRedefineClassesTrace.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/vframeArray.hpp" 36 #include "vmreg_sparc.inline.hpp" 37 #ifdef COMPILER1 38 #include "c1/c1_Runtime1.hpp" 39 #endif 40 #ifdef COMPILER2 41 #include "opto/runtime.hpp" 42 #endif 43 #ifdef SHARK 44 #include "compiler/compileBroker.hpp" 45 #include "shark/sharkCompiler.hpp" 46 #endif 47 #if INCLUDE_JVMCI 48 #include "jvmci/jvmciJavaClasses.hpp" 49 #endif 50 51 #define __ masm-> 52 53 54 class RegisterSaver { 55 56 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O. 57 // The Oregs are problematic. In the 32bit build the compiler can 58 // have O registers live with 64 bit quantities. A window save will 59 // cut the heads off of the registers. We have to do a very extensive 60 // stack dance to save and restore these properly. 61 62 // Note that the Oregs problem only exists if we block at either a polling 63 // page exception a compiled code safepoint that was not originally a call 64 // or deoptimize following one of these kinds of safepoints. 65 66 // Lots of registers to save. For all builds, a window save will preserve 67 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit 68 // builds a window-save will preserve the %o registers. In the LION build 69 // we need to save the 64-bit %o registers which requires we save them 70 // before the window-save (as then they become %i registers and get their 71 // heads chopped off on interrupt). We have to save some %g registers here 72 // as well. 73 enum { 74 // This frame's save area. Includes extra space for the native call: 75 // vararg's layout space and the like. Briefly holds the caller's 76 // register save area. 77 call_args_area = frame::register_save_words_sp_offset + 78 frame::memory_parameter_word_sp_offset*wordSize, 79 // Make sure save locations are always 8 byte aligned. 80 // can't use round_to because it doesn't produce compile time constant 81 start_of_extra_save_area = ((call_args_area + 7) & ~7), 82 g1_offset = start_of_extra_save_area, // g-regs needing saving 83 g3_offset = g1_offset+8, 84 g4_offset = g3_offset+8, 85 g5_offset = g4_offset+8, 86 o0_offset = g5_offset+8, 87 o1_offset = o0_offset+8, 88 o2_offset = o1_offset+8, 89 o3_offset = o2_offset+8, 90 o4_offset = o3_offset+8, 91 o5_offset = o4_offset+8, 92 start_of_flags_save_area = o5_offset+8, 93 ccr_offset = start_of_flags_save_area, 94 fsr_offset = ccr_offset + 8, 95 d00_offset = fsr_offset+8, // Start of float save area 96 register_save_size = d00_offset+8*32 97 }; 98 99 100 public: 101 102 static int Oexception_offset() { return o0_offset; }; 103 static int G3_offset() { return g3_offset; }; 104 static int G5_offset() { return g5_offset; }; 105 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 106 static void restore_live_registers(MacroAssembler* masm); 107 108 // During deoptimization only the result register need to be restored 109 // all the other values have already been extracted. 110 111 static void restore_result_registers(MacroAssembler* masm); 112 }; 113 114 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 115 // Record volatile registers as callee-save values in an OopMap so their save locations will be 116 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 117 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 118 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 119 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 120 int i; 121 // Always make the frame size 16 byte aligned. 122 int frame_size = round_to(additional_frame_words + register_save_size, 16); 123 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words 124 int frame_size_in_slots = frame_size / sizeof(jint); 125 // CodeBlob frame size is in words. 126 *total_frame_words = frame_size / wordSize; 127 // OopMap* map = new OopMap(*total_frame_words, 0); 128 OopMap* map = new OopMap(frame_size_in_slots, 0); 129 130 #if !defined(_LP64) 131 132 // Save 64-bit O registers; they will get their heads chopped off on a 'save'. 133 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 134 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 135 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 136 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 137 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 138 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 139 #endif /* _LP64 */ 140 141 __ save(SP, -frame_size, SP); 142 143 #ifndef _LP64 144 // Reload the 64 bit Oregs. Although they are now Iregs we load them 145 // to Oregs here to avoid interrupts cutting off their heads 146 147 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 148 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 149 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 150 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 151 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 152 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 153 154 __ stx(O0, SP, o0_offset+STACK_BIAS); 155 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg()); 156 157 __ stx(O1, SP, o1_offset+STACK_BIAS); 158 159 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg()); 160 161 __ stx(O2, SP, o2_offset+STACK_BIAS); 162 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg()); 163 164 __ stx(O3, SP, o3_offset+STACK_BIAS); 165 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg()); 166 167 __ stx(O4, SP, o4_offset+STACK_BIAS); 168 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg()); 169 170 __ stx(O5, SP, o5_offset+STACK_BIAS); 171 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg()); 172 #endif /* _LP64 */ 173 174 175 #ifdef _LP64 176 int debug_offset = 0; 177 #else 178 int debug_offset = 4; 179 #endif 180 // Save the G's 181 __ stx(G1, SP, g1_offset+STACK_BIAS); 182 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); 183 184 __ stx(G3, SP, g3_offset+STACK_BIAS); 185 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg()); 186 187 __ stx(G4, SP, g4_offset+STACK_BIAS); 188 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg()); 189 190 __ stx(G5, SP, g5_offset+STACK_BIAS); 191 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg()); 192 193 // This is really a waste but we'll keep things as they were for now 194 if (true) { 195 #ifndef _LP64 196 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next()); 197 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next()); 198 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next()); 199 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next()); 200 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next()); 201 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next()); 202 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next()); 203 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next()); 204 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next()); 205 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next()); 206 #endif /* _LP64 */ 207 } 208 209 210 // Save the flags 211 __ rdccr( G5 ); 212 __ stx(G5, SP, ccr_offset+STACK_BIAS); 213 __ stxfsr(SP, fsr_offset+STACK_BIAS); 214 215 // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles) 216 int offset = d00_offset; 217 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 218 FloatRegister f = as_FloatRegister(i); 219 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS); 220 // Record as callee saved both halves of double registers (2 float registers). 221 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg()); 222 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next()); 223 offset += sizeof(double); 224 } 225 226 // And we're done. 227 228 return map; 229 } 230 231 232 // Pop the current frame and restore all the registers that we 233 // saved. 234 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 235 236 // Restore all the FP registers 237 for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) { 238 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i)); 239 } 240 241 __ ldx(SP, ccr_offset+STACK_BIAS, G1); 242 __ wrccr (G1) ; 243 244 // Restore the G's 245 // Note that G2 (AKA GThread) must be saved and restored separately. 246 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr. 247 248 __ ldx(SP, g1_offset+STACK_BIAS, G1); 249 __ ldx(SP, g3_offset+STACK_BIAS, G3); 250 __ ldx(SP, g4_offset+STACK_BIAS, G4); 251 __ ldx(SP, g5_offset+STACK_BIAS, G5); 252 253 254 #if !defined(_LP64) 255 // Restore the 64-bit O's. 256 __ ldx(SP, o0_offset+STACK_BIAS, O0); 257 __ ldx(SP, o1_offset+STACK_BIAS, O1); 258 __ ldx(SP, o2_offset+STACK_BIAS, O2); 259 __ ldx(SP, o3_offset+STACK_BIAS, O3); 260 __ ldx(SP, o4_offset+STACK_BIAS, O4); 261 __ ldx(SP, o5_offset+STACK_BIAS, O5); 262 263 // And temporarily place them in TLS 264 265 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 266 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 267 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 268 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 269 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 270 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 271 #endif /* _LP64 */ 272 273 // Restore flags 274 275 __ ldxfsr(SP, fsr_offset+STACK_BIAS); 276 277 __ restore(); 278 279 #if !defined(_LP64) 280 // Now reload the 64bit Oregs after we've restore the window. 281 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 282 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 283 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 284 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 285 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 286 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 287 #endif /* _LP64 */ 288 289 } 290 291 // Pop the current frame and restore the registers that might be holding 292 // a result. 293 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 294 295 #if !defined(_LP64) 296 // 32bit build returns longs in G1 297 __ ldx(SP, g1_offset+STACK_BIAS, G1); 298 299 // Retrieve the 64-bit O's. 300 __ ldx(SP, o0_offset+STACK_BIAS, O0); 301 __ ldx(SP, o1_offset+STACK_BIAS, O1); 302 // and save to TLS 303 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 304 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 305 #endif /* _LP64 */ 306 307 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); 308 309 __ restore(); 310 311 #if !defined(_LP64) 312 // Now reload the 64bit Oregs after we've restore the window. 313 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 314 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 315 #endif /* _LP64 */ 316 317 } 318 319 // Is vector's size (in bytes) bigger than a size saved by default? 320 // 8 bytes FP registers are saved by default on SPARC. 321 bool SharedRuntime::is_wide_vector(int size) { 322 // Note, MaxVectorSize == 8 on SPARC. 323 assert(size <= 8, "%d bytes vectors are not supported", size); 324 return size > 8; 325 } 326 327 // The java_calling_convention describes stack locations as ideal slots on 328 // a frame with no abi restrictions. Since we must observe abi restrictions 329 // (like the placement of the register window) the slots must be biased by 330 // the following value. 331 static int reg2offset(VMReg r) { 332 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 333 } 334 335 static VMRegPair reg64_to_VMRegPair(Register r) { 336 VMRegPair ret; 337 if (wordSize == 8) { 338 ret.set2(r->as_VMReg()); 339 } else { 340 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg()); 341 } 342 return ret; 343 } 344 345 // --------------------------------------------------------------------------- 346 // Read the array of BasicTypes from a signature, and compute where the 347 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 348 // quantities. Values less than VMRegImpl::stack0 are registers, those above 349 // refer to 4-byte stack slots. All stack slots are based off of the window 350 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window, 351 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 352 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit 353 // integer registers. Values 64-95 are the (32-bit only) float registers. 354 // Each 32-bit quantity is given its own number, so the integer registers 355 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is 356 // an O0-low and an O0-high. Essentially, all int register numbers are doubled. 357 358 // Register results are passed in O0-O5, for outgoing call arguments. To 359 // convert to incoming arguments, convert all O's to I's. The regs array 360 // refer to the low and hi 32-bit words of 64-bit registers or stack slots. 361 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a 362 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was 363 // passed (used as a placeholder for the other half of longs and doubles in 364 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is 365 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention). 366 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first() 367 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the 368 // same VMRegPair. 369 370 // Note: the INPUTS in sig_bt are in units of Java argument words, which are 371 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 372 // units regardless of build. 373 374 375 // --------------------------------------------------------------------------- 376 // The compiled Java calling convention. The Java convention always passes 377 // 64-bit values in adjacent aligned locations (either registers or stack), 378 // floats in float registers and doubles in aligned float pairs. There is 379 // no backing varargs store for values in registers. 380 // In the 32-bit build, longs are passed on the stack (cannot be 381 // passed in I's, because longs in I's get their heads chopped off at 382 // interrupt). 383 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 384 VMRegPair *regs, 385 int total_args_passed, 386 int is_outgoing) { 387 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers"); 388 389 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM; 390 const int flt_reg_max = 8; 391 392 int int_reg = 0; 393 int flt_reg = 0; 394 int slot = 0; 395 396 for (int i = 0; i < total_args_passed; i++) { 397 switch (sig_bt[i]) { 398 case T_INT: 399 case T_SHORT: 400 case T_CHAR: 401 case T_BYTE: 402 case T_BOOLEAN: 403 #ifndef _LP64 404 case T_OBJECT: 405 case T_ARRAY: 406 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 407 #endif // _LP64 408 if (int_reg < int_reg_max) { 409 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 410 regs[i].set1(r->as_VMReg()); 411 } else { 412 regs[i].set1(VMRegImpl::stack2reg(slot++)); 413 } 414 break; 415 416 #ifdef _LP64 417 case T_LONG: 418 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 419 // fall-through 420 case T_OBJECT: 421 case T_ARRAY: 422 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 423 if (int_reg < int_reg_max) { 424 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 425 regs[i].set2(r->as_VMReg()); 426 } else { 427 slot = round_to(slot, 2); // align 428 regs[i].set2(VMRegImpl::stack2reg(slot)); 429 slot += 2; 430 } 431 break; 432 #else 433 case T_LONG: 434 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 435 // On 32-bit SPARC put longs always on the stack to keep the pressure off 436 // integer argument registers. They should be used for oops. 437 slot = round_to(slot, 2); // align 438 regs[i].set2(VMRegImpl::stack2reg(slot)); 439 slot += 2; 440 #endif 441 break; 442 443 case T_FLOAT: 444 if (flt_reg < flt_reg_max) { 445 FloatRegister r = as_FloatRegister(flt_reg++); 446 regs[i].set1(r->as_VMReg()); 447 } else { 448 regs[i].set1(VMRegImpl::stack2reg(slot++)); 449 } 450 break; 451 452 case T_DOUBLE: 453 assert(sig_bt[i+1] == T_VOID, "expecting half"); 454 if (round_to(flt_reg, 2) + 1 < flt_reg_max) { 455 flt_reg = round_to(flt_reg, 2); // align 456 FloatRegister r = as_FloatRegister(flt_reg); 457 regs[i].set2(r->as_VMReg()); 458 flt_reg += 2; 459 } else { 460 slot = round_to(slot, 2); // align 461 regs[i].set2(VMRegImpl::stack2reg(slot)); 462 slot += 2; 463 } 464 break; 465 466 case T_VOID: 467 regs[i].set_bad(); // Halves of longs & doubles 468 break; 469 470 default: 471 fatal("unknown basic type %d", sig_bt[i]); 472 break; 473 } 474 } 475 476 // retun the amount of stack space these arguments will need. 477 return slot; 478 } 479 480 // Helper class mostly to avoid passing masm everywhere, and handle 481 // store displacement overflow logic. 482 class AdapterGenerator { 483 MacroAssembler *masm; 484 Register Rdisp; 485 void set_Rdisp(Register r) { Rdisp = r; } 486 487 void patch_callers_callsite(); 488 489 // base+st_off points to top of argument 490 int arg_offset(const int st_off) { return st_off; } 491 int next_arg_offset(const int st_off) { 492 return st_off - Interpreter::stackElementSize; 493 } 494 495 // Argument slot values may be loaded first into a register because 496 // they might not fit into displacement. 497 RegisterOrConstant arg_slot(const int st_off); 498 RegisterOrConstant next_arg_slot(const int st_off); 499 500 // Stores long into offset pointed to by base 501 void store_c2i_long(Register r, Register base, 502 const int st_off, bool is_stack); 503 void store_c2i_object(Register r, Register base, 504 const int st_off); 505 void store_c2i_int(Register r, Register base, 506 const int st_off); 507 void store_c2i_double(VMReg r_2, 508 VMReg r_1, Register base, const int st_off); 509 void store_c2i_float(FloatRegister f, Register base, 510 const int st_off); 511 512 public: 513 void gen_c2i_adapter(int total_args_passed, 514 // VMReg max_arg, 515 int comp_args_on_stack, // VMRegStackSlots 516 const BasicType *sig_bt, 517 const VMRegPair *regs, 518 Label& skip_fixup); 519 void gen_i2c_adapter(int total_args_passed, 520 // VMReg max_arg, 521 int comp_args_on_stack, // VMRegStackSlots 522 const BasicType *sig_bt, 523 const VMRegPair *regs); 524 525 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} 526 }; 527 528 529 // Patch the callers callsite with entry to compiled code if it exists. 530 void AdapterGenerator::patch_callers_callsite() { 531 Label L; 532 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 533 __ br_null(G3_scratch, false, Assembler::pt, L); 534 __ delayed()->nop(); 535 // Call into the VM to patch the caller, then jump to compiled callee 536 __ save_frame(4); // Args in compiled layout; do not blow them 537 538 // Must save all the live Gregs the list is: 539 // G1: 1st Long arg (32bit build) 540 // G2: global allocated to TLS 541 // G3: used in inline cache check (scratch) 542 // G4: 2nd Long arg (32bit build); 543 // G5: used in inline cache check (Method*) 544 545 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 546 547 #ifdef _LP64 548 // mov(s,d) 549 __ mov(G1, L1); 550 __ mov(G4, L4); 551 __ mov(G5_method, L5); 552 __ mov(G5_method, O0); // VM needs target method 553 __ mov(I7, O1); // VM needs caller's callsite 554 // Must be a leaf call... 555 // can be very far once the blob has been relocated 556 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 557 __ relocate(relocInfo::runtime_call_type); 558 __ jumpl_to(dest, O7, O7); 559 __ delayed()->mov(G2_thread, L7_thread_cache); 560 __ mov(L7_thread_cache, G2_thread); 561 __ mov(L1, G1); 562 __ mov(L4, G4); 563 __ mov(L5, G5_method); 564 #else 565 __ stx(G1, FP, -8 + STACK_BIAS); 566 __ stx(G4, FP, -16 + STACK_BIAS); 567 __ mov(G5_method, L5); 568 __ mov(G5_method, O0); // VM needs target method 569 __ mov(I7, O1); // VM needs caller's callsite 570 // Must be a leaf call... 571 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type); 572 __ delayed()->mov(G2_thread, L7_thread_cache); 573 __ mov(L7_thread_cache, G2_thread); 574 __ ldx(FP, -8 + STACK_BIAS, G1); 575 __ ldx(FP, -16 + STACK_BIAS, G4); 576 __ mov(L5, G5_method); 577 #endif /* _LP64 */ 578 579 __ restore(); // Restore args 580 __ bind(L); 581 } 582 583 584 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) { 585 RegisterOrConstant roc(arg_offset(st_off)); 586 return __ ensure_simm13_or_reg(roc, Rdisp); 587 } 588 589 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) { 590 RegisterOrConstant roc(next_arg_offset(st_off)); 591 return __ ensure_simm13_or_reg(roc, Rdisp); 592 } 593 594 595 // Stores long into offset pointed to by base 596 void AdapterGenerator::store_c2i_long(Register r, Register base, 597 const int st_off, bool is_stack) { 598 #ifdef _LP64 599 // In V9, longs are given 2 64-bit slots in the interpreter, but the 600 // data is passed in only 1 slot. 601 __ stx(r, base, next_arg_slot(st_off)); 602 #else 603 #ifdef COMPILER2 604 // Misaligned store of 64-bit data 605 __ stw(r, base, arg_slot(st_off)); // lo bits 606 __ srlx(r, 32, r); 607 __ stw(r, base, next_arg_slot(st_off)); // hi bits 608 #else 609 if (is_stack) { 610 // Misaligned store of 64-bit data 611 __ stw(r, base, arg_slot(st_off)); // lo bits 612 __ srlx(r, 32, r); 613 __ stw(r, base, next_arg_slot(st_off)); // hi bits 614 } else { 615 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits 616 __ stw(r , base, next_arg_slot(st_off)); // hi bits 617 } 618 #endif // COMPILER2 619 #endif // _LP64 620 } 621 622 void AdapterGenerator::store_c2i_object(Register r, Register base, 623 const int st_off) { 624 __ st_ptr (r, base, arg_slot(st_off)); 625 } 626 627 void AdapterGenerator::store_c2i_int(Register r, Register base, 628 const int st_off) { 629 __ st (r, base, arg_slot(st_off)); 630 } 631 632 // Stores into offset pointed to by base 633 void AdapterGenerator::store_c2i_double(VMReg r_2, 634 VMReg r_1, Register base, const int st_off) { 635 #ifdef _LP64 636 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 637 // data is passed in only 1 slot. 638 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 639 #else 640 // Need to marshal 64-bit value from misaligned Lesp loads 641 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 642 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) ); 643 #endif 644 } 645 646 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 647 const int st_off) { 648 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off)); 649 } 650 651 void AdapterGenerator::gen_c2i_adapter( 652 int total_args_passed, 653 // VMReg max_arg, 654 int comp_args_on_stack, // VMRegStackSlots 655 const BasicType *sig_bt, 656 const VMRegPair *regs, 657 Label& L_skip_fixup) { 658 659 // Before we get into the guts of the C2I adapter, see if we should be here 660 // at all. We've come from compiled code and are attempting to jump to the 661 // interpreter, which means the caller made a static call to get here 662 // (vcalls always get a compiled target if there is one). Check for a 663 // compiled target. If there is one, we need to patch the caller's call. 664 // However we will run interpreted if we come thru here. The next pass 665 // thru the call site will run compiled. If we ran compiled here then 666 // we can (theorectically) do endless i2c->c2i->i2c transitions during 667 // deopt/uncommon trap cycles. If we always go interpreted here then 668 // we can have at most one and don't need to play any tricks to keep 669 // from endlessly growing the stack. 670 // 671 // Actually if we detected that we had an i2c->c2i transition here we 672 // ought to be able to reset the world back to the state of the interpreted 673 // call and not bother building another interpreter arg area. We don't 674 // do that at this point. 675 676 patch_callers_callsite(); 677 678 __ bind(L_skip_fixup); 679 680 // Since all args are passed on the stack, total_args_passed*wordSize is the 681 // space we need. Add in varargs area needed by the interpreter. Round up 682 // to stack alignment. 683 const int arg_size = total_args_passed * Interpreter::stackElementSize; 684 const int varargs_area = 685 (frame::varargs_offset - frame::register_save_words)*wordSize; 686 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize); 687 688 const int bias = STACK_BIAS; 689 const int interp_arg_offset = frame::varargs_offset*wordSize + 690 (total_args_passed-1)*Interpreter::stackElementSize; 691 692 const Register base = SP; 693 694 // Make some extra space on the stack. 695 __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP); 696 set_Rdisp(G3_scratch); 697 698 // Write the args into the outgoing interpreter space. 699 for (int i = 0; i < total_args_passed; i++) { 700 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias; 701 VMReg r_1 = regs[i].first(); 702 VMReg r_2 = regs[i].second(); 703 if (!r_1->is_valid()) { 704 assert(!r_2->is_valid(), ""); 705 continue; 706 } 707 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1 708 RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias; 709 ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp); 710 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle 711 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); 712 else __ ldx(base, ld_off, G1_scratch); 713 } 714 715 if (r_1->is_Register()) { 716 Register r = r_1->as_Register()->after_restore(); 717 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 718 store_c2i_object(r, base, st_off); 719 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 720 store_c2i_long(r, base, st_off, r_2->is_stack()); 721 } else { 722 store_c2i_int(r, base, st_off); 723 } 724 } else { 725 assert(r_1->is_FloatRegister(), ""); 726 if (sig_bt[i] == T_FLOAT) { 727 store_c2i_float(r_1->as_FloatRegister(), base, st_off); 728 } else { 729 assert(sig_bt[i] == T_DOUBLE, "wrong type"); 730 store_c2i_double(r_2, r_1, base, st_off); 731 } 732 } 733 } 734 735 // Load the interpreter entry point. 736 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 737 738 // Pass O5_savedSP as an argument to the interpreter. 739 // The interpreter will restore SP to this value before returning. 740 __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP); 741 742 __ mov((frame::varargs_offset)*wordSize - 743 1*Interpreter::stackElementSize+bias+BytesPerWord, G1); 744 // Jump to the interpreter just as if interpreter was doing it. 745 __ jmpl(G3_scratch, 0, G0); 746 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp 747 // (really L0) is in use by the compiled frame as a generic temp. However, 748 // the interpreter does not know where its args are without some kind of 749 // arg pointer being passed in. Pass it in Gargs. 750 __ delayed()->add(SP, G1, Gargs); 751 } 752 753 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg, 754 address code_start, address code_end, 755 Label& L_ok) { 756 Label L_fail; 757 __ set(ExternalAddress(code_start), temp_reg); 758 __ set(pointer_delta(code_end, code_start, 1), temp2_reg); 759 __ cmp(pc_reg, temp_reg); 760 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail); 761 __ delayed()->add(temp_reg, temp2_reg, temp_reg); 762 __ cmp(pc_reg, temp_reg); 763 __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); 764 __ bind(L_fail); 765 } 766 767 void AdapterGenerator::gen_i2c_adapter(int total_args_passed, 768 // VMReg max_arg, 769 int comp_args_on_stack, // VMRegStackSlots 770 const BasicType *sig_bt, 771 const VMRegPair *regs) { 772 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 773 // layout. Lesp was saved by the calling I-frame and will be restored on 774 // return. Meanwhile, outgoing arg space is all owned by the callee 775 // C-frame, so we can mangle it at will. After adjusting the frame size, 776 // hoist register arguments and repack other args according to the compiled 777 // code convention. Finally, end in a jump to the compiled code. The entry 778 // point address is the start of the buffer. 779 780 // We will only enter here from an interpreted frame and never from after 781 // passing thru a c2i. Azul allowed this but we do not. If we lose the 782 // race and use a c2i we will remain interpreted for the race loser(s). 783 // This removes all sorts of headaches on the x86 side and also eliminates 784 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 785 786 // More detail: 787 // Adapters can be frameless because they do not require the caller 788 // to perform additional cleanup work, such as correcting the stack pointer. 789 // An i2c adapter is frameless because the *caller* frame, which is interpreted, 790 // routinely repairs its own stack pointer (from interpreter_frame_last_sp), 791 // even if a callee has modified the stack pointer. 792 // A c2i adapter is frameless because the *callee* frame, which is interpreted, 793 // routinely repairs its caller's stack pointer (from sender_sp, which is set 794 // up via the senderSP register). 795 // In other words, if *either* the caller or callee is interpreted, we can 796 // get the stack pointer repaired after a call. 797 // This is why c2i and i2c adapters cannot be indefinitely composed. 798 // In particular, if a c2i adapter were to somehow call an i2c adapter, 799 // both caller and callee would be compiled methods, and neither would 800 // clean up the stack pointer changes performed by the two adapters. 801 // If this happens, control eventually transfers back to the compiled 802 // caller, but with an uncorrected stack, causing delayed havoc. 803 804 if (VerifyAdapterCalls && 805 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { 806 // So, let's test for cascading c2i/i2c adapters right now. 807 // assert(Interpreter::contains($return_addr) || 808 // StubRoutines::contains($return_addr), 809 // "i2c adapter must return to an interpreter frame"); 810 __ block_comment("verify_i2c { "); 811 Label L_ok; 812 if (Interpreter::code() != NULL) 813 range_check(masm, O7, O0, O1, 814 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 815 L_ok); 816 if (StubRoutines::code1() != NULL) 817 range_check(masm, O7, O0, O1, 818 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), 819 L_ok); 820 if (StubRoutines::code2() != NULL) 821 range_check(masm, O7, O0, O1, 822 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), 823 L_ok); 824 const char* msg = "i2c adapter must return to an interpreter frame"; 825 __ block_comment(msg); 826 __ stop(msg); 827 __ bind(L_ok); 828 __ block_comment("} verify_i2ce "); 829 } 830 831 // As you can see from the list of inputs & outputs there are not a lot 832 // of temp registers to work with: mostly G1, G3 & G4. 833 834 // Inputs: 835 // G2_thread - TLS 836 // G5_method - Method oop 837 // G4 (Gargs) - Pointer to interpreter's args 838 // O0..O4 - free for scratch 839 // O5_savedSP - Caller's saved SP, to be restored if needed 840 // O6 - Current SP! 841 // O7 - Valid return address 842 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 843 844 // Outputs: 845 // G2_thread - TLS 846 // O0-O5 - Outgoing args in compiled layout 847 // O6 - Adjusted or restored SP 848 // O7 - Valid return address 849 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 850 // F0-F7 - more outgoing args 851 852 853 // Gargs is the incoming argument base, and also an outgoing argument. 854 __ sub(Gargs, BytesPerWord, Gargs); 855 856 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME 857 // WITH O7 HOLDING A VALID RETURN PC 858 // 859 // | | 860 // : java stack : 861 // | | 862 // +--------------+ <--- start of outgoing args 863 // | receiver | | 864 // : rest of args : |---size is java-arg-words 865 // | | | 866 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I 867 // | | | 868 // : unused : |---Space for max Java stack, plus stack alignment 869 // | | | 870 // +--------------+ <--- SP + 16*wordsize 871 // | | 872 // : window : 873 // | | 874 // +--------------+ <--- SP 875 876 // WE REPACK THE STACK. We use the common calling convention layout as 877 // discovered by calling SharedRuntime::calling_convention. We assume it 878 // causes an arbitrary shuffle of memory, which may require some register 879 // temps to do the shuffle. We hope for (and optimize for) the case where 880 // temps are not needed. We may have to resize the stack slightly, in case 881 // we need alignment padding (32-bit interpreter can pass longs & doubles 882 // misaligned, but the compilers expect them aligned). 883 // 884 // | | 885 // : java stack : 886 // | | 887 // +--------------+ <--- start of outgoing args 888 // | pad, align | | 889 // +--------------+ | 890 // | ints, longs, | | 891 // | floats, | |---Outgoing stack args. 892 // : doubles : | First few args in registers. 893 // | | | 894 // +--------------+ <--- SP' + 16*wordsize 895 // | | 896 // : window : 897 // | | 898 // +--------------+ <--- SP' 899 900 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME 901 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP 902 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN. 903 904 // Cut-out for having no stack args. Since up to 6 args are passed 905 // in registers, we will commonly have no stack args. 906 if (comp_args_on_stack > 0) { 907 // Convert VMReg stack slots to words. 908 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 909 // Round up to miminum stack alignment, in wordSize 910 comp_words_on_stack = round_to(comp_words_on_stack, 2); 911 // Now compute the distance from Lesp to SP. This calculation does not 912 // include the space for total_args_passed because Lesp has not yet popped 913 // the arguments. 914 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 915 } 916 917 // Now generate the shuffle code. Pick up all register args and move the 918 // rest through G1_scratch. 919 for (int i = 0; i < total_args_passed; i++) { 920 if (sig_bt[i] == T_VOID) { 921 // Longs and doubles are passed in native word order, but misaligned 922 // in the 32-bit build. 923 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 924 continue; 925 } 926 927 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the 928 // 32-bit build and aligned in the 64-bit build. Look for the obvious 929 // ldx/lddf optimizations. 930 931 // Load in argument order going down. 932 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize; 933 set_Rdisp(G1_scratch); 934 935 VMReg r_1 = regs[i].first(); 936 VMReg r_2 = regs[i].second(); 937 if (!r_1->is_valid()) { 938 assert(!r_2->is_valid(), ""); 939 continue; 940 } 941 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9 942 r_1 = F8->as_VMReg(); // as part of the load/store shuffle 943 if (r_2->is_valid()) r_2 = r_1->next(); 944 } 945 if (r_1->is_Register()) { // Register argument 946 Register r = r_1->as_Register()->after_restore(); 947 if (!r_2->is_valid()) { 948 __ ld(Gargs, arg_slot(ld_off), r); 949 } else { 950 #ifdef _LP64 951 // In V9, longs are given 2 64-bit slots in the interpreter, but the 952 // data is passed in only 1 slot. 953 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ? 954 next_arg_slot(ld_off) : arg_slot(ld_off); 955 __ ldx(Gargs, slot, r); 956 #else 957 fatal("longs should be on stack"); 958 #endif 959 } 960 } else { 961 assert(r_1->is_FloatRegister(), ""); 962 if (!r_2->is_valid()) { 963 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); 964 } else { 965 #ifdef _LP64 966 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 967 // data is passed in only 1 slot. This code also handles longs that 968 // are passed on the stack, but need a stack-to-stack move through a 969 // spare float register. 970 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? 971 next_arg_slot(ld_off) : arg_slot(ld_off); 972 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); 973 #else 974 // Need to marshal 64-bit value from misaligned Lesp loads 975 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister()); 976 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister()); 977 #endif 978 } 979 } 980 // Was the argument really intended to be on the stack, but was loaded 981 // into F8/F9? 982 if (regs[i].first()->is_stack()) { 983 assert(r_1->as_FloatRegister() == F8, "fix this code"); 984 // Convert stack slot to an SP offset 985 int st_off = reg2offset(regs[i].first()) + STACK_BIAS; 986 // Store down the shuffled stack word. Target address _is_ aligned. 987 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp); 988 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot); 989 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot); 990 } 991 } 992 993 // Jump to the compiled code just as if compiled code was doing it. 994 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3); 995 #if INCLUDE_JVMCI 996 if (EnableJVMCI) { 997 // check if this call should be routed towards a specific entry point 998 __ ld(Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), G1); 999 __ cmp(G0, G1); 1000 Label no_alternative_target; 1001 __ br(Assembler::equal, false, Assembler::pn, no_alternative_target); 1002 __ delayed()->nop(); 1003 1004 __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()), G3); 1005 __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 1006 1007 __ bind(no_alternative_target); 1008 } 1009 #endif // INCLUDE_JVMCI 1010 1011 // 6243940 We might end up in handle_wrong_method if 1012 // the callee is deoptimized as we race thru here. If that 1013 // happens we don't want to take a safepoint because the 1014 // caller frame will look interpreted and arguments are now 1015 // "compiled" so it is much better to make this transition 1016 // invisible to the stack walking code. Unfortunately if 1017 // we try and find the callee by normal means a safepoint 1018 // is possible. So we stash the desired callee in the thread 1019 // and the vm will find there should this case occur. 1020 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); 1021 __ st_ptr(G5_method, callee_target_addr); 1022 __ jmpl(G3, 0, G0); 1023 __ delayed()->nop(); 1024 } 1025 1026 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 1027 int total_args_passed, 1028 int comp_args_on_stack, 1029 const BasicType *sig_bt, 1030 const VMRegPair *regs) { 1031 AdapterGenerator agen(masm); 1032 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 1033 } 1034 1035 // --------------------------------------------------------------- 1036 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1037 int total_args_passed, 1038 // VMReg max_arg, 1039 int comp_args_on_stack, // VMRegStackSlots 1040 const BasicType *sig_bt, 1041 const VMRegPair *regs, 1042 AdapterFingerPrint* fingerprint) { 1043 address i2c_entry = __ pc(); 1044 1045 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1046 1047 1048 // ------------------------------------------------------------------------- 1049 // Generate a C2I adapter. On entry we know G5 holds the Method*. The 1050 // args start out packed in the compiled layout. They need to be unpacked 1051 // into the interpreter layout. This will almost always require some stack 1052 // space. We grow the current (compiled) stack, then repack the args. We 1053 // finally end in a jump to the generic interpreter entry point. On exit 1054 // from the interpreter, the interpreter will restore our SP (lest the 1055 // compiled code, which relys solely on SP and not FP, get sick). 1056 1057 address c2i_unverified_entry = __ pc(); 1058 Label L_skip_fixup; 1059 { 1060 Register R_temp = G1; // another scratch register 1061 1062 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1063 1064 __ verify_oop(O0); 1065 __ load_klass(O0, G3_scratch); 1066 1067 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp); 1068 __ cmp(G3_scratch, R_temp); 1069 1070 Label ok, ok2; 1071 __ brx(Assembler::equal, false, Assembler::pt, ok); 1072 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method); 1073 __ jump_to(ic_miss, G3_scratch); 1074 __ delayed()->nop(); 1075 1076 __ bind(ok); 1077 // Method might have been compiled since the call site was patched to 1078 // interpreted if that is the case treat it as a miss so we can get 1079 // the call site corrected. 1080 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 1081 __ bind(ok2); 1082 __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup); 1083 __ delayed()->nop(); 1084 __ jump_to(ic_miss, G3_scratch); 1085 __ delayed()->nop(); 1086 1087 } 1088 1089 address c2i_entry = __ pc(); 1090 AdapterGenerator agen(masm); 1091 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup); 1092 1093 __ flush(); 1094 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 1095 1096 } 1097 1098 // Helper function for native calling conventions 1099 static VMReg int_stk_helper( int i ) { 1100 // Bias any stack based VMReg we get by ignoring the window area 1101 // but not the register parameter save area. 1102 // 1103 // This is strange for the following reasons. We'd normally expect 1104 // the calling convention to return an VMReg for a stack slot 1105 // completely ignoring any abi reserved area. C2 thinks of that 1106 // abi area as only out_preserve_stack_slots. This does not include 1107 // the area allocated by the C abi to store down integer arguments 1108 // because the java calling convention does not use it. So 1109 // since c2 assumes that there are only out_preserve_stack_slots 1110 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack 1111 // location the c calling convention must add in this bias amount 1112 // to make up for the fact that the out_preserve_stack_slots is 1113 // insufficient for C calls. What a mess. I sure hope those 6 1114 // stack words were worth it on every java call! 1115 1116 // Another way of cleaning this up would be for out_preserve_stack_slots 1117 // to take a parameter to say whether it was C or java calling conventions. 1118 // Then things might look a little better (but not much). 1119 1120 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM; 1121 if( mem_parm_offset < 0 ) { 1122 return as_oRegister(i)->as_VMReg(); 1123 } else { 1124 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word; 1125 // Now return a biased offset that will be correct when out_preserve_slots is added back in 1126 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots()); 1127 } 1128 } 1129 1130 1131 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1132 VMRegPair *regs, 1133 VMRegPair *regs2, 1134 int total_args_passed) { 1135 assert(regs2 == NULL, "not needed on sparc"); 1136 1137 // Return the number of VMReg stack_slots needed for the args. 1138 // This value does not include an abi space (like register window 1139 // save area). 1140 1141 // The native convention is V8 if !LP64 1142 // The LP64 convention is the V9 convention which is slightly more sane. 1143 1144 // We return the amount of VMReg stack slots we need to reserve for all 1145 // the arguments NOT counting out_preserve_stack_slots. Since we always 1146 // have space for storing at least 6 registers to memory we start with that. 1147 // See int_stk_helper for a further discussion. 1148 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); 1149 1150 #ifdef _LP64 1151 // V9 convention: All things "as-if" on double-wide stack slots. 1152 // Hoist any int/ptr/long's in the first 6 to int regs. 1153 // Hoist any flt/dbl's in the first 16 dbl regs. 1154 int j = 0; // Count of actual args, not HALVES 1155 VMRegPair param_array_reg; // location of the argument in the parameter array 1156 for (int i = 0; i < total_args_passed; i++, j++) { 1157 param_array_reg.set_bad(); 1158 switch (sig_bt[i]) { 1159 case T_BOOLEAN: 1160 case T_BYTE: 1161 case T_CHAR: 1162 case T_INT: 1163 case T_SHORT: 1164 regs[i].set1(int_stk_helper(j)); 1165 break; 1166 case T_LONG: 1167 assert(sig_bt[i+1] == T_VOID, "expecting half"); 1168 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1169 case T_ARRAY: 1170 case T_OBJECT: 1171 case T_METADATA: 1172 regs[i].set2(int_stk_helper(j)); 1173 break; 1174 case T_FLOAT: 1175 // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here 1176 // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz 1177 // 1178 // "When a callee prototype exists, and does not indicate variable arguments, 1179 // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248 1180 // will be promoted to floating-point registers" 1181 // 1182 // By "promoted" it means that the argument is located in two places, an unused 1183 // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live 1184 // float register. In most cases, there are 6 or fewer arguments of any type, 1185 // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive) 1186 // serve as shadow slots. Per the spec floating point registers %d6 to %d16 1187 // require slots beyond that (up to %sp+BIAS+248). 1188 // 1189 { 1190 // V9ism: floats go in ODD registers and stack slots 1191 int float_index = 1 + (j << 1); 1192 param_array_reg.set1(VMRegImpl::stack2reg(float_index)); 1193 if (j < 16) { 1194 regs[i].set1(as_FloatRegister(float_index)->as_VMReg()); 1195 } else { 1196 regs[i] = param_array_reg; 1197 } 1198 } 1199 break; 1200 case T_DOUBLE: 1201 { 1202 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 1203 // V9ism: doubles go in EVEN/ODD regs and stack slots 1204 int double_index = (j << 1); 1205 param_array_reg.set2(VMRegImpl::stack2reg(double_index)); 1206 if (j < 16) { 1207 regs[i].set2(as_FloatRegister(double_index)->as_VMReg()); 1208 } else { 1209 // V9ism: doubles go in EVEN/ODD stack slots 1210 regs[i] = param_array_reg; 1211 } 1212 } 1213 break; 1214 case T_VOID: 1215 regs[i].set_bad(); 1216 j--; 1217 break; // Do not count HALVES 1218 default: 1219 ShouldNotReachHere(); 1220 } 1221 // Keep track of the deepest parameter array slot. 1222 if (!param_array_reg.first()->is_valid()) { 1223 param_array_reg = regs[i]; 1224 } 1225 if (param_array_reg.first()->is_stack()) { 1226 int off = param_array_reg.first()->reg2stack(); 1227 if (off > max_stack_slots) max_stack_slots = off; 1228 } 1229 if (param_array_reg.second()->is_stack()) { 1230 int off = param_array_reg.second()->reg2stack(); 1231 if (off > max_stack_slots) max_stack_slots = off; 1232 } 1233 } 1234 1235 #else // _LP64 1236 // V8 convention: first 6 things in O-regs, rest on stack. 1237 // Alignment is willy-nilly. 1238 for (int i = 0; i < total_args_passed; i++) { 1239 switch (sig_bt[i]) { 1240 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1241 case T_ARRAY: 1242 case T_BOOLEAN: 1243 case T_BYTE: 1244 case T_CHAR: 1245 case T_FLOAT: 1246 case T_INT: 1247 case T_OBJECT: 1248 case T_METADATA: 1249 case T_SHORT: 1250 regs[i].set1(int_stk_helper(i)); 1251 break; 1252 case T_DOUBLE: 1253 case T_LONG: 1254 assert(sig_bt[i + 1] == T_VOID, "expecting half"); 1255 regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i)); 1256 break; 1257 case T_VOID: regs[i].set_bad(); break; 1258 default: 1259 ShouldNotReachHere(); 1260 } 1261 if (regs[i].first()->is_stack()) { 1262 int off = regs[i].first()->reg2stack(); 1263 if (off > max_stack_slots) max_stack_slots = off; 1264 } 1265 if (regs[i].second()->is_stack()) { 1266 int off = regs[i].second()->reg2stack(); 1267 if (off > max_stack_slots) max_stack_slots = off; 1268 } 1269 } 1270 #endif // _LP64 1271 1272 return round_to(max_stack_slots + 1, 2); 1273 1274 } 1275 1276 1277 // --------------------------------------------------------------------------- 1278 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1279 switch (ret_type) { 1280 case T_FLOAT: 1281 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS); 1282 break; 1283 case T_DOUBLE: 1284 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS); 1285 break; 1286 } 1287 } 1288 1289 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1290 switch (ret_type) { 1291 case T_FLOAT: 1292 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0); 1293 break; 1294 case T_DOUBLE: 1295 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0); 1296 break; 1297 } 1298 } 1299 1300 // Check and forward and pending exception. Thread is stored in 1301 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there 1302 // is no exception handler. We merely pop this frame off and throw the 1303 // exception in the caller's frame. 1304 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) { 1305 Label L; 1306 __ br_null(Rex_oop, false, Assembler::pt, L); 1307 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1308 // Since this is a native call, we *know* the proper exception handler 1309 // without calling into the VM: it's the empty function. Just pop this 1310 // frame and then jump to forward_exception_entry; O7 will contain the 1311 // native caller's return PC. 1312 AddressLiteral exception_entry(StubRoutines::forward_exception_entry()); 1313 __ jump_to(exception_entry, G3_scratch); 1314 __ delayed()->restore(); // Pop this frame off. 1315 __ bind(L); 1316 } 1317 1318 // A simple move of integer like type 1319 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1320 if (src.first()->is_stack()) { 1321 if (dst.first()->is_stack()) { 1322 // stack to stack 1323 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1324 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1325 } else { 1326 // stack to reg 1327 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1328 } 1329 } else if (dst.first()->is_stack()) { 1330 // reg to stack 1331 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1332 } else { 1333 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1334 } 1335 } 1336 1337 // On 64 bit we will store integer like items to the stack as 1338 // 64 bits items (sparc abi) even though java would only store 1339 // 32bits for a parameter. On 32bit it will simply be 32 bits 1340 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 1341 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1342 if (src.first()->is_stack()) { 1343 if (dst.first()->is_stack()) { 1344 // stack to stack 1345 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1346 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1347 } else { 1348 // stack to reg 1349 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1350 } 1351 } else if (dst.first()->is_stack()) { 1352 // reg to stack 1353 // Some compilers (gcc) expect a clean 32 bit value on function entry 1354 __ signx(src.first()->as_Register(), L5); 1355 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1356 } else { 1357 // Some compilers (gcc) expect a clean 32 bit value on function entry 1358 __ signx(src.first()->as_Register(), dst.first()->as_Register()); 1359 } 1360 } 1361 1362 1363 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1364 if (src.first()->is_stack()) { 1365 if (dst.first()->is_stack()) { 1366 // stack to stack 1367 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1368 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1369 } else { 1370 // stack to reg 1371 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1372 } 1373 } else if (dst.first()->is_stack()) { 1374 // reg to stack 1375 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1376 } else { 1377 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1378 } 1379 } 1380 1381 1382 // An oop arg. Must pass a handle not the oop itself 1383 static void object_move(MacroAssembler* masm, 1384 OopMap* map, 1385 int oop_handle_offset, 1386 int framesize_in_slots, 1387 VMRegPair src, 1388 VMRegPair dst, 1389 bool is_receiver, 1390 int* receiver_offset) { 1391 1392 // must pass a handle. First figure out the location we use as a handle 1393 1394 if (src.first()->is_stack()) { 1395 // Oop is already on the stack 1396 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); 1397 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); 1398 __ ld_ptr(rHandle, 0, L4); 1399 #ifdef _LP64 1400 __ movr( Assembler::rc_z, L4, G0, rHandle ); 1401 #else 1402 __ tst( L4 ); 1403 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1404 #endif 1405 if (dst.first()->is_stack()) { 1406 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1407 } 1408 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1409 if (is_receiver) { 1410 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1411 } 1412 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1413 } else { 1414 // Oop is in an input register pass we must flush it to the stack 1415 const Register rOop = src.first()->as_Register(); 1416 const Register rHandle = L5; 1417 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; 1418 int offset = oop_slot * VMRegImpl::stack_slot_size; 1419 __ st_ptr(rOop, SP, offset + STACK_BIAS); 1420 if (is_receiver) { 1421 *receiver_offset = offset; 1422 } 1423 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1424 __ add(SP, offset + STACK_BIAS, rHandle); 1425 #ifdef _LP64 1426 __ movr( Assembler::rc_z, rOop, G0, rHandle ); 1427 #else 1428 __ tst( rOop ); 1429 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1430 #endif 1431 1432 if (dst.first()->is_stack()) { 1433 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1434 } else { 1435 __ mov(rHandle, dst.first()->as_Register()); 1436 } 1437 } 1438 } 1439 1440 // A float arg may have to do float reg int reg conversion 1441 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1442 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1443 1444 if (src.first()->is_stack()) { 1445 if (dst.first()->is_stack()) { 1446 // stack to stack the easiest of the bunch 1447 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1448 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1449 } else { 1450 // stack to reg 1451 if (dst.first()->is_Register()) { 1452 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1453 } else { 1454 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1455 } 1456 } 1457 } else if (dst.first()->is_stack()) { 1458 // reg to stack 1459 if (src.first()->is_Register()) { 1460 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1461 } else { 1462 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1463 } 1464 } else { 1465 // reg to reg 1466 if (src.first()->is_Register()) { 1467 if (dst.first()->is_Register()) { 1468 // gpr -> gpr 1469 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1470 } else { 1471 // gpr -> fpr 1472 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS); 1473 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister()); 1474 } 1475 } else if (dst.first()->is_Register()) { 1476 // fpr -> gpr 1477 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS); 1478 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register()); 1479 } else { 1480 // fpr -> fpr 1481 // In theory these overlap but the ordering is such that this is likely a nop 1482 if ( src.first() != dst.first()) { 1483 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1484 } 1485 } 1486 } 1487 } 1488 1489 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1490 VMRegPair src_lo(src.first()); 1491 VMRegPair src_hi(src.second()); 1492 VMRegPair dst_lo(dst.first()); 1493 VMRegPair dst_hi(dst.second()); 1494 simple_move32(masm, src_lo, dst_lo); 1495 simple_move32(masm, src_hi, dst_hi); 1496 } 1497 1498 // A long move 1499 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1500 1501 // Do the simple ones here else do two int moves 1502 if (src.is_single_phys_reg() ) { 1503 if (dst.is_single_phys_reg()) { 1504 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1505 } else { 1506 // split src into two separate registers 1507 // Remember hi means hi address or lsw on sparc 1508 // Move msw to lsw 1509 if (dst.second()->is_reg()) { 1510 // MSW -> MSW 1511 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register()); 1512 // Now LSW -> LSW 1513 // this will only move lo -> lo and ignore hi 1514 VMRegPair split(dst.second()); 1515 simple_move32(masm, src, split); 1516 } else { 1517 VMRegPair split(src.first(), L4->as_VMReg()); 1518 // MSW -> MSW (lo ie. first word) 1519 __ srax(src.first()->as_Register(), 32, L4); 1520 split_long_move(masm, split, dst); 1521 } 1522 } 1523 } else if (dst.is_single_phys_reg()) { 1524 if (src.is_adjacent_aligned_on_stack(2)) { 1525 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1526 } else { 1527 // dst is a single reg. 1528 // Remember lo is low address not msb for stack slots 1529 // and lo is the "real" register for registers 1530 // src is 1531 1532 VMRegPair split; 1533 1534 if (src.first()->is_reg()) { 1535 // src.lo (msw) is a reg, src.hi is stk/reg 1536 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg] 1537 split.set_pair(dst.first(), src.first()); 1538 } else { 1539 // msw is stack move to L5 1540 // lsw is stack move to dst.lo (real reg) 1541 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5 1542 split.set_pair(dst.first(), L5->as_VMReg()); 1543 } 1544 1545 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg) 1546 // msw -> src.lo/L5, lsw -> dst.lo 1547 split_long_move(masm, src, split); 1548 1549 // So dst now has the low order correct position the 1550 // msw half 1551 __ sllx(split.first()->as_Register(), 32, L5); 1552 1553 const Register d = dst.first()->as_Register(); 1554 __ or3(L5, d, d); 1555 } 1556 } else { 1557 // For LP64 we can probably do better. 1558 split_long_move(masm, src, dst); 1559 } 1560 } 1561 1562 // A double move 1563 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1564 1565 // The painful thing here is that like long_move a VMRegPair might be 1566 // 1: a single physical register 1567 // 2: two physical registers (v8) 1568 // 3: a physical reg [lo] and a stack slot [hi] (v8) 1569 // 4: two stack slots 1570 1571 // Since src is always a java calling convention we know that the src pair 1572 // is always either all registers or all stack (and aligned?) 1573 1574 // in a register [lo] and a stack slot [hi] 1575 if (src.first()->is_stack()) { 1576 if (dst.first()->is_stack()) { 1577 // stack to stack the easiest of the bunch 1578 // ought to be a way to do this where if alignment is ok we use ldd/std when possible 1579 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1580 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1581 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1582 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1583 } else { 1584 // stack to reg 1585 if (dst.second()->is_stack()) { 1586 // stack -> reg, stack -> stack 1587 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1588 if (dst.first()->is_Register()) { 1589 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1590 } else { 1591 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1592 } 1593 // This was missing. (very rare case) 1594 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1595 } else { 1596 // stack -> reg 1597 // Eventually optimize for alignment QQQ 1598 if (dst.first()->is_Register()) { 1599 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1600 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register()); 1601 } else { 1602 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1603 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister()); 1604 } 1605 } 1606 } 1607 } else if (dst.first()->is_stack()) { 1608 // reg to stack 1609 if (src.first()->is_Register()) { 1610 // Eventually optimize for alignment QQQ 1611 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1612 if (src.second()->is_stack()) { 1613 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1614 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1615 } else { 1616 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS); 1617 } 1618 } else { 1619 // fpr to stack 1620 if (src.second()->is_stack()) { 1621 ShouldNotReachHere(); 1622 } else { 1623 // Is the stack aligned? 1624 if (reg2offset(dst.first()) & 0x7) { 1625 // No do as pairs 1626 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1627 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS); 1628 } else { 1629 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1630 } 1631 } 1632 } 1633 } else { 1634 // reg to reg 1635 if (src.first()->is_Register()) { 1636 if (dst.first()->is_Register()) { 1637 // gpr -> gpr 1638 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1639 __ mov(src.second()->as_Register(), dst.second()->as_Register()); 1640 } else { 1641 // gpr -> fpr 1642 // ought to be able to do a single store 1643 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS); 1644 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS); 1645 // ought to be able to do a single load 1646 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister()); 1647 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister()); 1648 } 1649 } else if (dst.first()->is_Register()) { 1650 // fpr -> gpr 1651 // ought to be able to do a single store 1652 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS); 1653 // ought to be able to do a single load 1654 // REMEMBER first() is low address not LSB 1655 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register()); 1656 if (dst.second()->is_Register()) { 1657 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register()); 1658 } else { 1659 __ ld(FP, -4 + STACK_BIAS, L4); 1660 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1661 } 1662 } else { 1663 // fpr -> fpr 1664 // In theory these overlap but the ordering is such that this is likely a nop 1665 if ( src.first() != dst.first()) { 1666 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1667 } 1668 } 1669 } 1670 } 1671 1672 // Creates an inner frame if one hasn't already been created, and 1673 // saves a copy of the thread in L7_thread_cache 1674 static void create_inner_frame(MacroAssembler* masm, bool* already_created) { 1675 if (!*already_created) { 1676 __ save_frame(0); 1677 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below 1678 // Don't use save_thread because it smashes G2 and we merely want to save a 1679 // copy 1680 __ mov(G2_thread, L7_thread_cache); 1681 *already_created = true; 1682 } 1683 } 1684 1685 1686 static void save_or_restore_arguments(MacroAssembler* masm, 1687 const int stack_slots, 1688 const int total_in_args, 1689 const int arg_save_area, 1690 OopMap* map, 1691 VMRegPair* in_regs, 1692 BasicType* in_sig_bt) { 1693 // if map is non-NULL then the code should store the values, 1694 // otherwise it should load them. 1695 if (map != NULL) { 1696 // Fill in the map 1697 for (int i = 0; i < total_in_args; i++) { 1698 if (in_sig_bt[i] == T_ARRAY) { 1699 if (in_regs[i].first()->is_stack()) { 1700 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1701 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); 1702 } else if (in_regs[i].first()->is_Register()) { 1703 map->set_oop(in_regs[i].first()); 1704 } else { 1705 ShouldNotReachHere(); 1706 } 1707 } 1708 } 1709 } 1710 1711 // Save or restore double word values 1712 int handle_index = 0; 1713 for (int i = 0; i < total_in_args; i++) { 1714 int slot = handle_index + arg_save_area; 1715 int offset = slot * VMRegImpl::stack_slot_size; 1716 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) { 1717 const Register reg = in_regs[i].first()->as_Register(); 1718 if (reg->is_global()) { 1719 handle_index += 2; 1720 assert(handle_index <= stack_slots, "overflow"); 1721 if (map != NULL) { 1722 __ stx(reg, SP, offset + STACK_BIAS); 1723 } else { 1724 __ ldx(SP, offset + STACK_BIAS, reg); 1725 } 1726 } 1727 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) { 1728 handle_index += 2; 1729 assert(handle_index <= stack_slots, "overflow"); 1730 if (map != NULL) { 1731 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1732 } else { 1733 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1734 } 1735 } 1736 } 1737 // Save floats 1738 for (int i = 0; i < total_in_args; i++) { 1739 int slot = handle_index + arg_save_area; 1740 int offset = slot * VMRegImpl::stack_slot_size; 1741 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) { 1742 handle_index++; 1743 assert(handle_index <= stack_slots, "overflow"); 1744 if (map != NULL) { 1745 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS); 1746 } else { 1747 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister()); 1748 } 1749 } 1750 } 1751 1752 } 1753 1754 1755 // Check GCLocker::needs_gc and enter the runtime if it's true. This 1756 // keeps a new JNI critical region from starting until a GC has been 1757 // forced. Save down any oops in registers and describe them in an 1758 // OopMap. 1759 static void check_needs_gc_for_critical_native(MacroAssembler* masm, 1760 const int stack_slots, 1761 const int total_in_args, 1762 const int arg_save_area, 1763 OopMapSet* oop_maps, 1764 VMRegPair* in_regs, 1765 BasicType* in_sig_bt) { 1766 __ block_comment("check GCLocker::needs_gc"); 1767 Label cont; 1768 AddressLiteral sync_state(GCLocker::needs_gc_address()); 1769 __ load_bool_contents(sync_state, G3_scratch); 1770 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont); 1771 __ delayed()->nop(); 1772 1773 // Save down any values that are live in registers and call into the 1774 // runtime to halt for a GC 1775 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1776 save_or_restore_arguments(masm, stack_slots, total_in_args, 1777 arg_save_area, map, in_regs, in_sig_bt); 1778 1779 __ mov(G2_thread, L7_thread_cache); 1780 1781 __ set_last_Java_frame(SP, noreg); 1782 1783 __ block_comment("block_for_jni_critical"); 1784 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type); 1785 __ delayed()->mov(L7_thread_cache, O0); 1786 oop_maps->add_gc_map( __ offset(), map); 1787 1788 __ restore_thread(L7_thread_cache); // restore G2_thread 1789 __ reset_last_Java_frame(); 1790 1791 // Reload all the register arguments 1792 save_or_restore_arguments(masm, stack_slots, total_in_args, 1793 arg_save_area, NULL, in_regs, in_sig_bt); 1794 1795 __ bind(cont); 1796 #ifdef ASSERT 1797 if (StressCriticalJNINatives) { 1798 // Stress register saving 1799 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1800 save_or_restore_arguments(masm, stack_slots, total_in_args, 1801 arg_save_area, map, in_regs, in_sig_bt); 1802 // Destroy argument registers 1803 for (int i = 0; i < total_in_args; i++) { 1804 if (in_regs[i].first()->is_Register()) { 1805 const Register reg = in_regs[i].first()->as_Register(); 1806 if (reg->is_global()) { 1807 __ mov(G0, reg); 1808 } 1809 } else if (in_regs[i].first()->is_FloatRegister()) { 1810 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); 1811 } 1812 } 1813 1814 save_or_restore_arguments(masm, stack_slots, total_in_args, 1815 arg_save_area, NULL, in_regs, in_sig_bt); 1816 } 1817 #endif 1818 } 1819 1820 // Unpack an array argument into a pointer to the body and the length 1821 // if the array is non-null, otherwise pass 0 for both. 1822 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { 1823 // Pass the length, ptr pair 1824 Label is_null, done; 1825 if (reg.first()->is_stack()) { 1826 VMRegPair tmp = reg64_to_VMRegPair(L2); 1827 // Load the arg up from the stack 1828 move_ptr(masm, reg, tmp); 1829 reg = tmp; 1830 } 1831 __ cmp(reg.first()->as_Register(), G0); 1832 __ brx(Assembler::equal, false, Assembler::pt, is_null); 1833 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4); 1834 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg); 1835 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4); 1836 move32_64(masm, reg64_to_VMRegPair(L4), length_arg); 1837 __ ba_short(done); 1838 __ bind(is_null); 1839 // Pass zeros 1840 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg); 1841 move32_64(masm, reg64_to_VMRegPair(G0), length_arg); 1842 __ bind(done); 1843 } 1844 1845 static void verify_oop_args(MacroAssembler* masm, 1846 methodHandle method, 1847 const BasicType* sig_bt, 1848 const VMRegPair* regs) { 1849 Register temp_reg = G5_method; // not part of any compiled calling seq 1850 if (VerifyOops) { 1851 for (int i = 0; i < method->size_of_parameters(); i++) { 1852 if (sig_bt[i] == T_OBJECT || 1853 sig_bt[i] == T_ARRAY) { 1854 VMReg r = regs[i].first(); 1855 assert(r->is_valid(), "bad oop arg"); 1856 if (r->is_stack()) { 1857 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1858 ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg); 1859 __ ld_ptr(SP, ld_off, temp_reg); 1860 __ verify_oop(temp_reg); 1861 } else { 1862 __ verify_oop(r->as_Register()); 1863 } 1864 } 1865 } 1866 } 1867 } 1868 1869 static void gen_special_dispatch(MacroAssembler* masm, 1870 methodHandle method, 1871 const BasicType* sig_bt, 1872 const VMRegPair* regs) { 1873 verify_oop_args(masm, method, sig_bt, regs); 1874 vmIntrinsics::ID iid = method->intrinsic_id(); 1875 1876 // Now write the args into the outgoing interpreter space 1877 bool has_receiver = false; 1878 Register receiver_reg = noreg; 1879 int member_arg_pos = -1; 1880 Register member_reg = noreg; 1881 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1882 if (ref_kind != 0) { 1883 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1884 member_reg = G5_method; // known to be free at this point 1885 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1886 } else if (iid == vmIntrinsics::_invokeBasic) { 1887 has_receiver = true; 1888 } else { 1889 fatal("unexpected intrinsic id %d", iid); 1890 } 1891 1892 if (member_reg != noreg) { 1893 // Load the member_arg into register, if necessary. 1894 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1895 VMReg r = regs[member_arg_pos].first(); 1896 if (r->is_stack()) { 1897 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1898 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1899 __ ld_ptr(SP, ld_off, member_reg); 1900 } else { 1901 // no data motion is needed 1902 member_reg = r->as_Register(); 1903 } 1904 } 1905 1906 if (has_receiver) { 1907 // Make sure the receiver is loaded into a register. 1908 assert(method->size_of_parameters() > 0, "oob"); 1909 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1910 VMReg r = regs[0].first(); 1911 assert(r->is_valid(), "bad receiver arg"); 1912 if (r->is_stack()) { 1913 // Porting note: This assumes that compiled calling conventions always 1914 // pass the receiver oop in a register. If this is not true on some 1915 // platform, pick a temp and load the receiver from stack. 1916 fatal("receiver always in a register"); 1917 receiver_reg = G3_scratch; // known to be free at this point 1918 RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; 1919 ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); 1920 __ ld_ptr(SP, ld_off, receiver_reg); 1921 } else { 1922 // no data motion is needed 1923 receiver_reg = r->as_Register(); 1924 } 1925 } 1926 1927 // Figure out which address we are really jumping to: 1928 MethodHandles::generate_method_handle_dispatch(masm, iid, 1929 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1930 } 1931 1932 // --------------------------------------------------------------------------- 1933 // Generate a native wrapper for a given method. The method takes arguments 1934 // in the Java compiled code convention, marshals them to the native 1935 // convention (handlizes oops, etc), transitions to native, makes the call, 1936 // returns to java state (possibly blocking), unhandlizes any result and 1937 // returns. 1938 // 1939 // Critical native functions are a shorthand for the use of 1940 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1941 // functions. The wrapper is expected to unpack the arguments before 1942 // passing them to the callee and perform checks before and after the 1943 // native call to ensure that they GCLocker 1944 // lock_critical/unlock_critical semantics are followed. Some other 1945 // parts of JNI setup are skipped like the tear down of the JNI handle 1946 // block and the check for pending exceptions it's impossible for them 1947 // to be thrown. 1948 // 1949 // They are roughly structured like this: 1950 // if (GCLocker::needs_gc()) 1951 // SharedRuntime::block_for_jni_critical(); 1952 // tranistion to thread_in_native 1953 // unpack arrray arguments and call native entry point 1954 // check for safepoint in progress 1955 // check if any thread suspend flags are set 1956 // call into JVM and possible unlock the JNI critical 1957 // if a GC was suppressed while in the critical native. 1958 // transition back to thread_in_Java 1959 // return to caller 1960 // 1961 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1962 const methodHandle& method, 1963 int compile_id, 1964 BasicType* in_sig_bt, 1965 VMRegPair* in_regs, 1966 BasicType ret_type) { 1967 if (method->is_method_handle_intrinsic()) { 1968 vmIntrinsics::ID iid = method->intrinsic_id(); 1969 intptr_t start = (intptr_t)__ pc(); 1970 int vep_offset = ((intptr_t)__ pc()) - start; 1971 gen_special_dispatch(masm, 1972 method, 1973 in_sig_bt, 1974 in_regs); 1975 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1976 __ flush(); 1977 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1978 return nmethod::new_native_nmethod(method, 1979 compile_id, 1980 masm->code(), 1981 vep_offset, 1982 frame_complete, 1983 stack_slots / VMRegImpl::slots_per_word, 1984 in_ByteSize(-1), 1985 in_ByteSize(-1), 1986 (OopMapSet*)NULL); 1987 } 1988 bool is_critical_native = true; 1989 address native_func = method->critical_native_function(); 1990 if (native_func == NULL) { 1991 native_func = method->native_function(); 1992 is_critical_native = false; 1993 } 1994 assert(native_func != NULL, "must have function"); 1995 1996 // Native nmethod wrappers never take possesion of the oop arguments. 1997 // So the caller will gc the arguments. The only thing we need an 1998 // oopMap for is if the call is static 1999 // 2000 // An OopMap for lock (and class if static), and one for the VM call itself 2001 OopMapSet *oop_maps = new OopMapSet(); 2002 intptr_t start = (intptr_t)__ pc(); 2003 2004 // First thing make an ic check to see if we should even be here 2005 { 2006 Label L; 2007 const Register temp_reg = G3_scratch; 2008 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 2009 __ verify_oop(O0); 2010 __ load_klass(O0, temp_reg); 2011 __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L); 2012 2013 __ jump_to(ic_miss, temp_reg); 2014 __ delayed()->nop(); 2015 __ align(CodeEntryAlignment); 2016 __ bind(L); 2017 } 2018 2019 int vep_offset = ((intptr_t)__ pc()) - start; 2020 2021 #ifdef COMPILER1 2022 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) { 2023 // Object.hashCode, System.identityHashCode can pull the hashCode from the 2024 // header word instead of doing a full VM transition once it's been computed. 2025 // Since hashCode is usually polymorphic at call sites we can't do this 2026 // optimization at the call site without a lot of work. 2027 Label slowCase; 2028 Label done; 2029 Register obj_reg = O0; 2030 Register result = O0; 2031 Register header = G3_scratch; 2032 Register hash = G3_scratch; // overwrite header value with hash value 2033 Register mask = G1; // to get hash field from header 2034 2035 // Unlike for Object.hashCode, System.identityHashCode is static method and 2036 // gets object as argument instead of the receiver. 2037 if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) { 2038 assert(method->is_static(), "method should be static"); 2039 // return 0 for null reference input 2040 __ br_null(obj_reg, false, Assembler::pn, done); 2041 __ delayed()->mov(obj_reg, hash); 2042 } 2043 2044 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked. 2045 // We depend on hash_mask being at most 32 bits and avoid the use of 2046 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 2047 // vm: see markOop.hpp. 2048 __ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header); 2049 __ sethi(markOopDesc::hash_mask, mask); 2050 __ btst(markOopDesc::unlocked_value, header); 2051 __ br(Assembler::zero, false, Assembler::pn, slowCase); 2052 if (UseBiasedLocking) { 2053 // Check if biased and fall through to runtime if so 2054 __ delayed()->nop(); 2055 __ btst(markOopDesc::biased_lock_bit_in_place, header); 2056 __ br(Assembler::notZero, false, Assembler::pn, slowCase); 2057 } 2058 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask); 2059 2060 // Check for a valid (non-zero) hash code and get its value. 2061 #ifdef _LP64 2062 __ srlx(header, markOopDesc::hash_shift, hash); 2063 #else 2064 __ srl(header, markOopDesc::hash_shift, hash); 2065 #endif 2066 __ andcc(hash, mask, hash); 2067 __ br(Assembler::equal, false, Assembler::pn, slowCase); 2068 __ delayed()->nop(); 2069 2070 // leaf return. 2071 __ bind(done); 2072 __ retl(); 2073 __ delayed()->mov(hash, result); 2074 __ bind(slowCase); 2075 } 2076 #endif // COMPILER1 2077 2078 2079 // We have received a description of where all the java arg are located 2080 // on entry to the wrapper. We need to convert these args to where 2081 // the jni function will expect them. To figure out where they go 2082 // we convert the java signature to a C signature by inserting 2083 // the hidden arguments as arg[0] and possibly arg[1] (static method) 2084 2085 const int total_in_args = method->size_of_parameters(); 2086 int total_c_args = total_in_args; 2087 int total_save_slots = 6 * VMRegImpl::slots_per_word; 2088 if (!is_critical_native) { 2089 total_c_args += 1; 2090 if (method->is_static()) { 2091 total_c_args++; 2092 } 2093 } else { 2094 for (int i = 0; i < total_in_args; i++) { 2095 if (in_sig_bt[i] == T_ARRAY) { 2096 // These have to be saved and restored across the safepoint 2097 total_c_args++; 2098 } 2099 } 2100 } 2101 2102 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 2103 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 2104 BasicType* in_elem_bt = NULL; 2105 2106 int argc = 0; 2107 if (!is_critical_native) { 2108 out_sig_bt[argc++] = T_ADDRESS; 2109 if (method->is_static()) { 2110 out_sig_bt[argc++] = T_OBJECT; 2111 } 2112 2113 for (int i = 0; i < total_in_args ; i++ ) { 2114 out_sig_bt[argc++] = in_sig_bt[i]; 2115 } 2116 } else { 2117 Thread* THREAD = Thread::current(); 2118 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); 2119 SignatureStream ss(method->signature()); 2120 for (int i = 0; i < total_in_args ; i++ ) { 2121 if (in_sig_bt[i] == T_ARRAY) { 2122 // Arrays are passed as int, elem* pair 2123 out_sig_bt[argc++] = T_INT; 2124 out_sig_bt[argc++] = T_ADDRESS; 2125 Symbol* atype = ss.as_symbol(CHECK_NULL); 2126 const char* at = atype->as_C_string(); 2127 if (strlen(at) == 2) { 2128 assert(at[0] == '[', "must be"); 2129 switch (at[1]) { 2130 case 'B': in_elem_bt[i] = T_BYTE; break; 2131 case 'C': in_elem_bt[i] = T_CHAR; break; 2132 case 'D': in_elem_bt[i] = T_DOUBLE; break; 2133 case 'F': in_elem_bt[i] = T_FLOAT; break; 2134 case 'I': in_elem_bt[i] = T_INT; break; 2135 case 'J': in_elem_bt[i] = T_LONG; break; 2136 case 'S': in_elem_bt[i] = T_SHORT; break; 2137 case 'Z': in_elem_bt[i] = T_BOOLEAN; break; 2138 default: ShouldNotReachHere(); 2139 } 2140 } 2141 } else { 2142 out_sig_bt[argc++] = in_sig_bt[i]; 2143 in_elem_bt[i] = T_VOID; 2144 } 2145 if (in_sig_bt[i] != T_VOID) { 2146 assert(in_sig_bt[i] == ss.type(), "must match"); 2147 ss.next(); 2148 } 2149 } 2150 } 2151 2152 // Now figure out where the args must be stored and how much stack space 2153 // they require (neglecting out_preserve_stack_slots but space for storing 2154 // the 1st six register arguments). It's weird see int_stk_helper. 2155 // 2156 int out_arg_slots; 2157 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 2158 2159 if (is_critical_native) { 2160 // Critical natives may have to call out so they need a save area 2161 // for register arguments. 2162 int double_slots = 0; 2163 int single_slots = 0; 2164 for ( int i = 0; i < total_in_args; i++) { 2165 if (in_regs[i].first()->is_Register()) { 2166 const Register reg = in_regs[i].first()->as_Register(); 2167 switch (in_sig_bt[i]) { 2168 case T_ARRAY: 2169 case T_BOOLEAN: 2170 case T_BYTE: 2171 case T_SHORT: 2172 case T_CHAR: 2173 case T_INT: assert(reg->is_in(), "don't need to save these"); break; 2174 case T_LONG: if (reg->is_global()) double_slots++; break; 2175 default: ShouldNotReachHere(); 2176 } 2177 } else if (in_regs[i].first()->is_FloatRegister()) { 2178 switch (in_sig_bt[i]) { 2179 case T_FLOAT: single_slots++; break; 2180 case T_DOUBLE: double_slots++; break; 2181 default: ShouldNotReachHere(); 2182 } 2183 } 2184 } 2185 total_save_slots = double_slots * 2 + single_slots; 2186 } 2187 2188 // Compute framesize for the wrapper. We need to handlize all oops in 2189 // registers. We must create space for them here that is disjoint from 2190 // the windowed save area because we have no control over when we might 2191 // flush the window again and overwrite values that gc has since modified. 2192 // (The live window race) 2193 // 2194 // We always just allocate 6 word for storing down these object. This allow 2195 // us to simply record the base and use the Ireg number to decide which 2196 // slot to use. (Note that the reg number is the inbound number not the 2197 // outbound number). 2198 // We must shuffle args to match the native convention, and include var-args space. 2199 2200 // Calculate the total number of stack slots we will need. 2201 2202 // First count the abi requirement plus all of the outgoing args 2203 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2204 2205 // Now the space for the inbound oop handle area 2206 2207 int oop_handle_offset = round_to(stack_slots, 2); 2208 stack_slots += total_save_slots; 2209 2210 // Now any space we need for handlizing a klass if static method 2211 2212 int klass_slot_offset = 0; 2213 int klass_offset = -1; 2214 int lock_slot_offset = 0; 2215 bool is_static = false; 2216 2217 if (method->is_static()) { 2218 klass_slot_offset = stack_slots; 2219 stack_slots += VMRegImpl::slots_per_word; 2220 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 2221 is_static = true; 2222 } 2223 2224 // Plus a lock if needed 2225 2226 if (method->is_synchronized()) { 2227 lock_slot_offset = stack_slots; 2228 stack_slots += VMRegImpl::slots_per_word; 2229 } 2230 2231 // Now a place to save return value or as a temporary for any gpr -> fpr moves 2232 stack_slots += 2; 2233 2234 // Ok The space we have allocated will look like: 2235 // 2236 // 2237 // FP-> | | 2238 // |---------------------| 2239 // | 2 slots for moves | 2240 // |---------------------| 2241 // | lock box (if sync) | 2242 // |---------------------| <- lock_slot_offset 2243 // | klass (if static) | 2244 // |---------------------| <- klass_slot_offset 2245 // | oopHandle area | 2246 // |---------------------| <- oop_handle_offset 2247 // | outbound memory | 2248 // | based arguments | 2249 // | | 2250 // |---------------------| 2251 // | vararg area | 2252 // |---------------------| 2253 // | | 2254 // SP-> | out_preserved_slots | 2255 // 2256 // 2257 2258 2259 // Now compute actual number of stack words we need rounding to make 2260 // stack properly aligned. 2261 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word); 2262 2263 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2264 2265 // Generate stack overflow check before creating frame 2266 __ generate_stack_overflow_check(stack_size); 2267 2268 // Generate a new frame for the wrapper. 2269 __ save(SP, -stack_size, SP); 2270 2271 int frame_complete = ((intptr_t)__ pc()) - start; 2272 2273 __ verify_thread(); 2274 2275 if (is_critical_native) { 2276 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, 2277 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 2278 } 2279 2280 // 2281 // We immediately shuffle the arguments so that any vm call we have to 2282 // make from here on out (sync slow path, jvmti, etc.) we will have 2283 // captured the oops from our caller and have a valid oopMap for 2284 // them. 2285 2286 // ----------------- 2287 // The Grand Shuffle 2288 // 2289 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2290 // (derived from JavaThread* which is in L7_thread_cache) and, if static, 2291 // the class mirror instead of a receiver. This pretty much guarantees that 2292 // register layout will not match. We ignore these extra arguments during 2293 // the shuffle. The shuffle is described by the two calling convention 2294 // vectors we have in our possession. We simply walk the java vector to 2295 // get the source locations and the c vector to get the destinations. 2296 // Because we have a new window and the argument registers are completely 2297 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about 2298 // here. 2299 2300 // This is a trick. We double the stack slots so we can claim 2301 // the oops in the caller's frame. Since we are sure to have 2302 // more args than the caller doubling is enough to make 2303 // sure we can capture all the incoming oop args from the 2304 // caller. 2305 // 2306 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2307 // Record sp-based slot for receiver on stack for non-static methods 2308 int receiver_offset = -1; 2309 2310 // We move the arguments backward because the floating point registers 2311 // destination will always be to a register with a greater or equal register 2312 // number or the stack. 2313 2314 #ifdef ASSERT 2315 bool reg_destroyed[RegisterImpl::number_of_registers]; 2316 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2317 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2318 reg_destroyed[r] = false; 2319 } 2320 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2321 freg_destroyed[f] = false; 2322 } 2323 2324 #endif /* ASSERT */ 2325 2326 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) { 2327 2328 #ifdef ASSERT 2329 if (in_regs[i].first()->is_Register()) { 2330 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2331 } else if (in_regs[i].first()->is_FloatRegister()) { 2332 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!"); 2333 } 2334 if (out_regs[c_arg].first()->is_Register()) { 2335 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2336 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 2337 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true; 2338 } 2339 #endif /* ASSERT */ 2340 2341 switch (in_sig_bt[i]) { 2342 case T_ARRAY: 2343 if (is_critical_native) { 2344 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]); 2345 c_arg--; 2346 break; 2347 } 2348 case T_OBJECT: 2349 assert(!is_critical_native, "no oop arguments"); 2350 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2351 ((i == 0) && (!is_static)), 2352 &receiver_offset); 2353 break; 2354 case T_VOID: 2355 break; 2356 2357 case T_FLOAT: 2358 float_move(masm, in_regs[i], out_regs[c_arg]); 2359 break; 2360 2361 case T_DOUBLE: 2362 assert( i + 1 < total_in_args && 2363 in_sig_bt[i + 1] == T_VOID && 2364 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2365 double_move(masm, in_regs[i], out_regs[c_arg]); 2366 break; 2367 2368 case T_LONG : 2369 long_move(masm, in_regs[i], out_regs[c_arg]); 2370 break; 2371 2372 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2373 2374 default: 2375 move32_64(masm, in_regs[i], out_regs[c_arg]); 2376 } 2377 } 2378 2379 // Pre-load a static method's oop into O1. Used both by locking code and 2380 // the normal JNI call code. 2381 if (method->is_static() && !is_critical_native) { 2382 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1); 2383 2384 // Now handlize the static class mirror in O1. It's known not-null. 2385 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2386 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2387 __ add(SP, klass_offset + STACK_BIAS, O1); 2388 } 2389 2390 2391 const Register L6_handle = L6; 2392 2393 if (method->is_synchronized()) { 2394 assert(!is_critical_native, "unhandled"); 2395 __ mov(O1, L6_handle); 2396 } 2397 2398 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2399 // except O6/O7. So if we must call out we must push a new frame. We immediately 2400 // push a new frame and flush the windows. 2401 #ifdef _LP64 2402 intptr_t thepc = (intptr_t) __ pc(); 2403 { 2404 address here = __ pc(); 2405 // Call the next instruction 2406 __ call(here + 8, relocInfo::none); 2407 __ delayed()->nop(); 2408 } 2409 #else 2410 intptr_t thepc = __ load_pc_address(O7, 0); 2411 #endif /* _LP64 */ 2412 2413 // We use the same pc/oopMap repeatedly when we call out 2414 oop_maps->add_gc_map(thepc - start, map); 2415 2416 // O7 now has the pc loaded that we will use when we finally call to native. 2417 2418 // Save thread in L7; it crosses a bunch of VM calls below 2419 // Don't use save_thread because it smashes G2 and we merely 2420 // want to save a copy 2421 __ mov(G2_thread, L7_thread_cache); 2422 2423 2424 // If we create an inner frame once is plenty 2425 // when we create it we must also save G2_thread 2426 bool inner_frame_created = false; 2427 2428 // dtrace method entry support 2429 { 2430 SkipIfEqual skip_if( 2431 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2432 // create inner frame 2433 __ save_frame(0); 2434 __ mov(G2_thread, L7_thread_cache); 2435 __ set_metadata_constant(method(), O1); 2436 __ call_VM_leaf(L7_thread_cache, 2437 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2438 G2_thread, O1); 2439 __ restore(); 2440 } 2441 2442 // RedefineClasses() tracing support for obsolete method entry 2443 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2444 // create inner frame 2445 __ save_frame(0); 2446 __ mov(G2_thread, L7_thread_cache); 2447 __ set_metadata_constant(method(), O1); 2448 __ call_VM_leaf(L7_thread_cache, 2449 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2450 G2_thread, O1); 2451 __ restore(); 2452 } 2453 2454 // We are in the jni frame unless saved_frame is true in which case 2455 // we are in one frame deeper (the "inner" frame). If we are in the 2456 // "inner" frames the args are in the Iregs and if the jni frame then 2457 // they are in the Oregs. 2458 // If we ever need to go to the VM (for locking, jvmti) then 2459 // we will always be in the "inner" frame. 2460 2461 // Lock a synchronized method 2462 int lock_offset = -1; // Set if locked 2463 if (method->is_synchronized()) { 2464 Register Roop = O1; 2465 const Register L3_box = L3; 2466 2467 create_inner_frame(masm, &inner_frame_created); 2468 2469 __ ld_ptr(I1, 0, O1); 2470 Label done; 2471 2472 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size); 2473 __ add(FP, lock_offset+STACK_BIAS, L3_box); 2474 #ifdef ASSERT 2475 if (UseBiasedLocking) { 2476 // making the box point to itself will make it clear it went unused 2477 // but also be obviously invalid 2478 __ st_ptr(L3_box, L3_box, 0); 2479 } 2480 #endif // ASSERT 2481 // 2482 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch 2483 // 2484 __ compiler_lock_object(Roop, L1, L3_box, L2); 2485 __ br(Assembler::equal, false, Assembler::pt, done); 2486 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box); 2487 2488 2489 // None of the above fast optimizations worked so we have to get into the 2490 // slow case of monitor enter. Inline a special case of call_VM that 2491 // disallows any pending_exception. 2492 __ mov(Roop, O0); // Need oop in O0 2493 __ mov(L3_box, O1); 2494 2495 // Record last_Java_sp, in case the VM code releases the JVM lock. 2496 2497 __ set_last_Java_frame(FP, I7); 2498 2499 // do the call 2500 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2501 __ delayed()->mov(L7_thread_cache, O2); 2502 2503 __ restore_thread(L7_thread_cache); // restore G2_thread 2504 __ reset_last_Java_frame(); 2505 2506 #ifdef ASSERT 2507 { Label L; 2508 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2509 __ br_null_short(O0, Assembler::pt, L); 2510 __ stop("no pending exception allowed on exit from IR::monitorenter"); 2511 __ bind(L); 2512 } 2513 #endif 2514 __ bind(done); 2515 } 2516 2517 2518 // Finally just about ready to make the JNI call 2519 2520 __ flushw(); 2521 if (inner_frame_created) { 2522 __ restore(); 2523 } else { 2524 // Store only what we need from this frame 2525 // QQQ I think that non-v9 (like we care) we don't need these saves 2526 // either as the flush traps and the current window goes too. 2527 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2528 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2529 } 2530 2531 // get JNIEnv* which is first argument to native 2532 if (!is_critical_native) { 2533 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2534 } 2535 2536 // Use that pc we placed in O7 a while back as the current frame anchor 2537 __ set_last_Java_frame(SP, O7); 2538 2539 // We flushed the windows ages ago now mark them as flushed before transitioning. 2540 __ set(JavaFrameAnchor::flushed, G3_scratch); 2541 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 2542 2543 // Transition from _thread_in_Java to _thread_in_native. 2544 __ set(_thread_in_native, G3_scratch); 2545 2546 #ifdef _LP64 2547 AddressLiteral dest(native_func); 2548 __ relocate(relocInfo::runtime_call_type); 2549 __ jumpl_to(dest, O7, O7); 2550 #else 2551 __ call(native_func, relocInfo::runtime_call_type); 2552 #endif 2553 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2554 2555 __ restore_thread(L7_thread_cache); // restore G2_thread 2556 2557 // Unpack native results. For int-types, we do any needed sign-extension 2558 // and move things into I0. The return value there will survive any VM 2559 // calls for blocking or unlocking. An FP or OOP result (handle) is done 2560 // specially in the slow-path code. 2561 switch (ret_type) { 2562 case T_VOID: break; // Nothing to do! 2563 case T_FLOAT: break; // Got it where we want it (unless slow-path) 2564 case T_DOUBLE: break; // Got it where we want it (unless slow-path) 2565 // In 64 bits build result is in O0, in O0, O1 in 32bit build 2566 case T_LONG: 2567 #ifndef _LP64 2568 __ mov(O1, I1); 2569 #endif 2570 // Fall thru 2571 case T_OBJECT: // Really a handle 2572 case T_ARRAY: 2573 case T_INT: 2574 __ mov(O0, I0); 2575 break; 2576 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false 2577 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break; 2578 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value! 2579 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break; 2580 break; // Cannot de-handlize until after reclaiming jvm_lock 2581 default: 2582 ShouldNotReachHere(); 2583 } 2584 2585 Label after_transition; 2586 // must we block? 2587 2588 // Block, if necessary, before resuming in _thread_in_Java state. 2589 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2590 { Label no_block; 2591 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 2592 2593 // Switch thread to "native transition" state before reading the synchronization state. 2594 // This additional state is necessary because reading and testing the synchronization 2595 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2596 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2597 // VM thread changes sync state to synchronizing and suspends threads for GC. 2598 // Thread A is resumed to finish this native method, but doesn't block here since it 2599 // didn't see any synchronization is progress, and escapes. 2600 __ set(_thread_in_native_trans, G3_scratch); 2601 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2602 if(os::is_MP()) { 2603 if (UseMembar) { 2604 // Force this write out before the read below 2605 __ membar(Assembler::StoreLoad); 2606 } else { 2607 // Write serialization page so VM thread can do a pseudo remote membar. 2608 // We use the current thread pointer to calculate a thread specific 2609 // offset to write to within the page. This minimizes bus traffic 2610 // due to cache line collision. 2611 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 2612 } 2613 } 2614 __ load_contents(sync_state, G3_scratch); 2615 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 2616 2617 Label L; 2618 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); 2619 __ br(Assembler::notEqual, false, Assembler::pn, L); 2620 __ delayed()->ld(suspend_state, G3_scratch); 2621 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); 2622 __ bind(L); 2623 2624 // Block. Save any potential method result value before the operation and 2625 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2626 // lets us share the oopMap we used when we went native rather the create 2627 // a distinct one for this pc 2628 // 2629 save_native_result(masm, ret_type, stack_slots); 2630 if (!is_critical_native) { 2631 __ call_VM_leaf(L7_thread_cache, 2632 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2633 G2_thread); 2634 } else { 2635 __ call_VM_leaf(L7_thread_cache, 2636 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), 2637 G2_thread); 2638 } 2639 2640 // Restore any method result value 2641 restore_native_result(masm, ret_type, stack_slots); 2642 2643 if (is_critical_native) { 2644 // The call above performed the transition to thread_in_Java so 2645 // skip the transition logic below. 2646 __ ba(after_transition); 2647 __ delayed()->nop(); 2648 } 2649 2650 __ bind(no_block); 2651 } 2652 2653 // thread state is thread_in_native_trans. Any safepoint blocking has already 2654 // happened so we can now change state to _thread_in_Java. 2655 __ set(_thread_in_Java, G3_scratch); 2656 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2657 __ bind(after_transition); 2658 2659 Label no_reguard; 2660 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch); 2661 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_reserved_disabled, Assembler::notEqual, Assembler::pt, no_reguard); 2662 2663 save_native_result(masm, ret_type, stack_slots); 2664 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2665 __ delayed()->nop(); 2666 2667 __ restore_thread(L7_thread_cache); // restore G2_thread 2668 restore_native_result(masm, ret_type, stack_slots); 2669 2670 __ bind(no_reguard); 2671 2672 // Handle possible exception (will unlock if necessary) 2673 2674 // native result if any is live in freg or I0 (and I1 if long and 32bit vm) 2675 2676 // Unlock 2677 if (method->is_synchronized()) { 2678 Label done; 2679 Register I2_ex_oop = I2; 2680 const Register L3_box = L3; 2681 // Get locked oop from the handle we passed to jni 2682 __ ld_ptr(L6_handle, 0, L4); 2683 __ add(SP, lock_offset+STACK_BIAS, L3_box); 2684 // Must save pending exception around the slow-path VM call. Since it's a 2685 // leaf call, the pending exception (if any) can be kept in a register. 2686 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop); 2687 // Now unlock 2688 // (Roop, Rmark, Rbox, Rscratch) 2689 __ compiler_unlock_object(L4, L1, L3_box, L2); 2690 __ br(Assembler::equal, false, Assembler::pt, done); 2691 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box); 2692 2693 // save and restore any potential method result value around the unlocking 2694 // operation. Will save in I0 (or stack for FP returns). 2695 save_native_result(masm, ret_type, stack_slots); 2696 2697 // Must clear pending-exception before re-entering the VM. Since this is 2698 // a leaf call, pending-exception-oop can be safely kept in a register. 2699 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset())); 2700 2701 // slow case of monitor enter. Inline a special case of call_VM that 2702 // disallows any pending_exception. 2703 __ mov(L3_box, O1); 2704 2705 // Pass in current thread pointer 2706 __ mov(G2_thread, O2); 2707 2708 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type); 2709 __ delayed()->mov(L4, O0); // Need oop in O0 2710 2711 __ restore_thread(L7_thread_cache); // restore G2_thread 2712 2713 #ifdef ASSERT 2714 { Label L; 2715 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2716 __ br_null_short(O0, Assembler::pt, L); 2717 __ stop("no pending exception allowed on exit from IR::monitorexit"); 2718 __ bind(L); 2719 } 2720 #endif 2721 restore_native_result(masm, ret_type, stack_slots); 2722 // check_forward_pending_exception jump to forward_exception if any pending 2723 // exception is set. The forward_exception routine expects to see the 2724 // exception in pending_exception and not in a register. Kind of clumsy, 2725 // since all folks who branch to forward_exception must have tested 2726 // pending_exception first and hence have it in a register already. 2727 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset())); 2728 __ bind(done); 2729 } 2730 2731 // Tell dtrace about this method exit 2732 { 2733 SkipIfEqual skip_if( 2734 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2735 save_native_result(masm, ret_type, stack_slots); 2736 __ set_metadata_constant(method(), O1); 2737 __ call_VM_leaf(L7_thread_cache, 2738 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2739 G2_thread, O1); 2740 restore_native_result(masm, ret_type, stack_slots); 2741 } 2742 2743 // Clear "last Java frame" SP and PC. 2744 __ verify_thread(); // G2_thread must be correct 2745 __ reset_last_Java_frame(); 2746 2747 // Unpack oop result 2748 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2749 Label L; 2750 __ addcc(G0, I0, G0); 2751 __ brx(Assembler::notZero, true, Assembler::pt, L); 2752 __ delayed()->ld_ptr(I0, 0, I0); 2753 __ mov(G0, I0); 2754 __ bind(L); 2755 __ verify_oop(I0); 2756 } 2757 2758 if (!is_critical_native) { 2759 // reset handle block 2760 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2761 __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2762 2763 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2764 check_forward_pending_exception(masm, G3_scratch); 2765 } 2766 2767 2768 // Return 2769 2770 #ifndef _LP64 2771 if (ret_type == T_LONG) { 2772 2773 // Must leave proper result in O0,O1 and G1 (c2/tiered only) 2774 __ sllx(I0, 32, G1); // Shift bits into high G1 2775 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 2776 __ or3 (I1, G1, G1); // OR 64 bits into G1 2777 } 2778 #endif 2779 2780 __ ret(); 2781 __ delayed()->restore(); 2782 2783 __ flush(); 2784 2785 nmethod *nm = nmethod::new_native_nmethod(method, 2786 compile_id, 2787 masm->code(), 2788 vep_offset, 2789 frame_complete, 2790 stack_slots / VMRegImpl::slots_per_word, 2791 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2792 in_ByteSize(lock_offset), 2793 oop_maps); 2794 2795 if (is_critical_native) { 2796 nm->set_lazy_critical_native(true); 2797 } 2798 return nm; 2799 2800 } 2801 2802 // this function returns the adjust size (in number of words) to a c2i adapter 2803 // activation for use during deoptimization 2804 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2805 assert(callee_locals >= callee_parameters, 2806 "test and remove; got more parms than locals"); 2807 if (callee_locals < callee_parameters) 2808 return 0; // No adjustment for negative locals 2809 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2810 return round_to(diff, WordsPerLong); 2811 } 2812 2813 // "Top of Stack" slots that may be unused by the calling convention but must 2814 // otherwise be preserved. 2815 // On Intel these are not necessary and the value can be zero. 2816 // On Sparc this describes the words reserved for storing a register window 2817 // when an interrupt occurs. 2818 uint SharedRuntime::out_preserve_stack_slots() { 2819 return frame::register_save_words * VMRegImpl::slots_per_word; 2820 } 2821 2822 static void gen_new_frame(MacroAssembler* masm, bool deopt) { 2823 // 2824 // Common out the new frame generation for deopt and uncommon trap 2825 // 2826 Register G3pcs = G3_scratch; // Array of new pcs (input) 2827 Register Oreturn0 = O0; 2828 Register Oreturn1 = O1; 2829 Register O2UnrollBlock = O2; 2830 Register O3array = O3; // Array of frame sizes (input) 2831 Register O4array_size = O4; // number of frames (input) 2832 Register O7frame_size = O7; // number of frames (input) 2833 2834 __ ld_ptr(O3array, 0, O7frame_size); 2835 __ sub(G0, O7frame_size, O7frame_size); 2836 __ save(SP, O7frame_size, SP); 2837 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc 2838 2839 #ifdef ASSERT 2840 // make sure that the frames are aligned properly 2841 #ifndef _LP64 2842 __ btst(wordSize*2-1, SP); 2843 __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc); 2844 #endif 2845 #endif 2846 2847 // Deopt needs to pass some extra live values from frame to frame 2848 2849 if (deopt) { 2850 __ mov(Oreturn0->after_save(), Oreturn0); 2851 __ mov(Oreturn1->after_save(), Oreturn1); 2852 } 2853 2854 __ mov(O4array_size->after_save(), O4array_size); 2855 __ sub(O4array_size, 1, O4array_size); 2856 __ mov(O3array->after_save(), O3array); 2857 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock); 2858 __ add(G3pcs, wordSize, G3pcs); // point to next pc value 2859 2860 #ifdef ASSERT 2861 // trash registers to show a clear pattern in backtraces 2862 __ set(0xDEAD0000, I0); 2863 __ add(I0, 2, I1); 2864 __ add(I0, 4, I2); 2865 __ add(I0, 6, I3); 2866 __ add(I0, 8, I4); 2867 // Don't touch I5 could have valuable savedSP 2868 __ set(0xDEADBEEF, L0); 2869 __ mov(L0, L1); 2870 __ mov(L0, L2); 2871 __ mov(L0, L3); 2872 __ mov(L0, L4); 2873 __ mov(L0, L5); 2874 2875 // trash the return value as there is nothing to return yet 2876 __ set(0xDEAD0001, O7); 2877 #endif 2878 2879 __ mov(SP, O5_savedSP); 2880 } 2881 2882 2883 static void make_new_frames(MacroAssembler* masm, bool deopt) { 2884 // 2885 // loop through the UnrollBlock info and create new frames 2886 // 2887 Register G3pcs = G3_scratch; 2888 Register Oreturn0 = O0; 2889 Register Oreturn1 = O1; 2890 Register O2UnrollBlock = O2; 2891 Register O3array = O3; 2892 Register O4array_size = O4; 2893 Label loop; 2894 2895 #ifdef ASSERT 2896 // Compilers generate code that bang the stack by as much as the 2897 // interpreter would need. So this stack banging should never 2898 // trigger a fault. Verify that it does not on non product builds. 2899 if (UseStackBanging) { 2900 // Get total frame size for interpreted frames 2901 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4); 2902 __ bang_stack_size(O4, O3, G3_scratch); 2903 } 2904 #endif 2905 2906 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size); 2907 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs); 2908 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array); 2909 2910 // Adjust old interpreter frame to make space for new frame's extra java locals 2911 // 2912 // We capture the original sp for the transition frame only because it is needed in 2913 // order to properly calculate interpreter_sp_adjustment. Even though in real life 2914 // every interpreter frame captures a savedSP it is only needed at the transition 2915 // (fortunately). If we had to have it correct everywhere then we would need to 2916 // be told the sp_adjustment for each frame we create. If the frame size array 2917 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 2918 // for each frame we create and keep up the illusion every where. 2919 // 2920 2921 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7); 2922 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 2923 __ sub(SP, O7, SP); 2924 2925 #ifdef ASSERT 2926 // make sure that there is at least one entry in the array 2927 __ tst(O4array_size); 2928 __ breakpoint_trap(Assembler::zero, Assembler::icc); 2929 #endif 2930 2931 // Now push the new interpreter frames 2932 __ bind(loop); 2933 2934 // allocate a new frame, filling the registers 2935 2936 gen_new_frame(masm, deopt); // allocate an interpreter frame 2937 2938 __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop); 2939 __ delayed()->add(O3array, wordSize, O3array); 2940 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc 2941 2942 } 2943 2944 //------------------------------generate_deopt_blob---------------------------- 2945 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 2946 // instead. 2947 void SharedRuntime::generate_deopt_blob() { 2948 // allocate space for the code 2949 ResourceMark rm; 2950 // setup code generation tools 2951 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 2952 #ifdef ASSERT 2953 if (UseStackBanging) { 2954 pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32; 2955 } 2956 #endif 2957 #if INCLUDE_JVMCI 2958 if (EnableJVMCI) { 2959 pad += 1000; // Increase the buffer size when compiling for JVMCI 2960 } 2961 #endif 2962 #ifdef _LP64 2963 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 2964 #else 2965 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread) 2966 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread) 2967 CodeBuffer buffer("deopt_blob", 1600+pad, 512); 2968 #endif /* _LP64 */ 2969 MacroAssembler* masm = new MacroAssembler(&buffer); 2970 FloatRegister Freturn0 = F0; 2971 Register Greturn1 = G1; 2972 Register Oreturn0 = O0; 2973 Register Oreturn1 = O1; 2974 Register O2UnrollBlock = O2; 2975 Register L0deopt_mode = L0; 2976 Register G4deopt_mode = G4_scratch; 2977 int frame_size_words; 2978 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS); 2979 #if !defined(_LP64) && defined(COMPILER2) 2980 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS); 2981 #endif 2982 Label cont; 2983 2984 OopMapSet *oop_maps = new OopMapSet(); 2985 2986 // 2987 // This is the entry point for code which is returning to a de-optimized 2988 // frame. 2989 // The steps taken by this frame are as follows: 2990 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1) 2991 // and all potentially live registers (at a pollpoint many registers can be live). 2992 // 2993 // - call the C routine: Deoptimization::fetch_unroll_info (this function 2994 // returns information about the number and size of interpreter frames 2995 // which are equivalent to the frame which is being deoptimized) 2996 // - deallocate the unpack frame, restoring only results values. Other 2997 // volatile registers will now be captured in the vframeArray as needed. 2998 // - deallocate the deoptimization frame 2999 // - in a loop using the information returned in the previous step 3000 // push new interpreter frames (take care to propagate the return 3001 // values through each new frame pushed) 3002 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0) 3003 // - call the C routine: Deoptimization::unpack_frames (this function 3004 // lays out values on the interpreter frame which was just created) 3005 // - deallocate the dummy unpack_frame 3006 // - ensure that all the return values are correctly set and then do 3007 // a return to the interpreter entry point 3008 // 3009 // Refer to the following methods for more information: 3010 // - Deoptimization::fetch_unroll_info 3011 // - Deoptimization::unpack_frames 3012 3013 OopMap* map = NULL; 3014 3015 int start = __ offset(); 3016 3017 // restore G2, the trampoline destroyed it 3018 __ get_thread(); 3019 3020 // On entry we have been called by the deoptimized nmethod with a call that 3021 // replaced the original call (or safepoint polling location) so the deoptimizing 3022 // pc is now in O7. Return values are still in the expected places 3023 3024 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3025 __ ba(cont); 3026 __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode); 3027 3028 3029 #if INCLUDE_JVMCI 3030 Label after_fetch_unroll_info_call; 3031 int implicit_exception_uncommon_trap_offset = 0; 3032 int uncommon_trap_offset = 0; 3033 3034 if (EnableJVMCI) { 3035 masm->block_comment("BEGIN implicit_exception_uncommon_trap"); 3036 implicit_exception_uncommon_trap_offset = __ offset() - start; 3037 3038 __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()), O7); 3039 __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 3040 __ add(O7, -8, O7); 3041 3042 uncommon_trap_offset = __ offset() - start; 3043 3044 // Save everything in sight. 3045 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3046 __ set_last_Java_frame(SP, NULL); 3047 3048 __ ld(G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()), O1); 3049 __ sub(G0, 1, L1); 3050 __ st(L1, G2_thread, in_bytes(JavaThread::pending_deoptimization_offset())); 3051 3052 __ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode); 3053 __ mov(G2_thread, O0); 3054 __ mov(L0deopt_mode, O2); 3055 __ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)); 3056 __ delayed()->nop(); 3057 oop_maps->add_gc_map( __ offset()-start, map->deep_copy()); 3058 __ get_thread(); 3059 __ add(O7, 8, O7); 3060 __ reset_last_Java_frame(); 3061 3062 __ ba(after_fetch_unroll_info_call); 3063 __ delayed()->nop(); // Delay slot 3064 masm->block_comment("END implicit_exception_uncommon_trap"); 3065 } // EnableJVMCI 3066 #endif // INCLUDE_JVMCI 3067 3068 int exception_offset = __ offset() - start; 3069 3070 // restore G2, the trampoline destroyed it 3071 __ get_thread(); 3072 3073 // On entry we have been jumped to by the exception handler (or exception_blob 3074 // for server). O0 contains the exception oop and O7 contains the original 3075 // exception pc. So if we push a frame here it will look to the 3076 // stack walking code (fetch_unroll_info) just like a normal call so 3077 // state will be extracted normally. 3078 3079 // save exception oop in JavaThread and fall through into the 3080 // exception_in_tls case since they are handled in same way except 3081 // for where the pending exception is kept. 3082 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); 3083 3084 // 3085 // Vanilla deoptimization with an exception pending in exception_oop 3086 // 3087 int exception_in_tls_offset = __ offset() - start; 3088 3089 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3090 // Opens a new stack frame 3091 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3092 3093 // Restore G2_thread 3094 __ get_thread(); 3095 3096 #ifdef ASSERT 3097 { 3098 // verify that there is really an exception oop in exception_oop 3099 Label has_exception; 3100 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); 3101 __ br_notnull_short(Oexception, Assembler::pt, has_exception); 3102 __ stop("no exception in thread"); 3103 __ bind(has_exception); 3104 3105 // verify that there is no pending exception 3106 Label no_pending_exception; 3107 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 3108 __ ld_ptr(exception_addr, Oexception); 3109 __ br_null_short(Oexception, Assembler::pt, no_pending_exception); 3110 __ stop("must not have pending exception here"); 3111 __ bind(no_pending_exception); 3112 } 3113 #endif 3114 3115 __ ba(cont); 3116 __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);; 3117 3118 // 3119 // Reexecute entry, similar to c2 uncommon trap 3120 // 3121 int reexecute_offset = __ offset() - start; 3122 #if INCLUDE_JVMCI && !defined(COMPILER1) 3123 if (EnableJVMCI && UseJVMCICompiler) { 3124 // JVMCI does not use this kind of deoptimization 3125 __ should_not_reach_here(); 3126 } 3127 #endif 3128 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3129 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3130 3131 __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode); 3132 3133 __ bind(cont); 3134 3135 __ set_last_Java_frame(SP, noreg); 3136 3137 // do the call by hand so we can get the oopmap 3138 3139 __ mov(G2_thread, L7_thread_cache); 3140 __ mov(L0deopt_mode, O1); 3141 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); 3142 __ delayed()->mov(G2_thread, O0); 3143 3144 // Set an oopmap for the call site this describes all our saved volatile registers 3145 3146 oop_maps->add_gc_map( __ offset()-start, map); 3147 3148 __ mov(L7_thread_cache, G2_thread); 3149 3150 __ reset_last_Java_frame(); 3151 3152 #if INCLUDE_JVMCI 3153 if (EnableJVMCI) { 3154 __ bind(after_fetch_unroll_info_call); 3155 } 3156 #endif 3157 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers 3158 // so this move will survive 3159 3160 __ mov(L0deopt_mode, G4deopt_mode); 3161 3162 __ mov(O0, O2UnrollBlock->after_save()); 3163 3164 RegisterSaver::restore_result_registers(masm); 3165 3166 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), G4deopt_mode); 3167 Label noException; 3168 __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException); 3169 3170 // Move the pending exception from exception_oop to Oexception so 3171 // the pending exception will be picked up the interpreter. 3172 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3173 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3174 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 3175 __ bind(noException); 3176 3177 // deallocate the deoptimization frame taking care to preserve the return values 3178 __ mov(Oreturn0, Oreturn0->after_save()); 3179 __ mov(Oreturn1, Oreturn1->after_save()); 3180 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3181 __ restore(); 3182 3183 // Allocate new interpreter frame(s) and possible c2i adapter frame 3184 3185 make_new_frames(masm, true); 3186 3187 // push a dummy "unpack_frame" taking care of float return values and 3188 // call Deoptimization::unpack_frames to have the unpacker layout 3189 // information in the interpreter frames just created and then return 3190 // to the interpreter entry point 3191 __ save(SP, -frame_size_words*wordSize, SP); 3192 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); 3193 #if !defined(_LP64) 3194 #if defined(COMPILER2) 3195 // 32-bit 1-register longs return longs in G1 3196 __ stx(Greturn1, saved_Greturn1_addr); 3197 #endif 3198 __ set_last_Java_frame(SP, noreg); 3199 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode); 3200 #else 3201 // LP64 uses g4 in set_last_Java_frame 3202 __ mov(G4deopt_mode, O1); 3203 __ set_last_Java_frame(SP, G0); 3204 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); 3205 #endif 3206 __ reset_last_Java_frame(); 3207 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); 3208 3209 #if !defined(_LP64) && defined(COMPILER2) 3210 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into 3211 // I0/I1 if the return value is long. 3212 Label not_long; 3213 __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long); 3214 __ ldd(saved_Greturn1_addr,I0); 3215 __ bind(not_long); 3216 #endif 3217 __ ret(); 3218 __ delayed()->restore(); 3219 3220 masm->flush(); 3221 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words); 3222 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3223 #if INCLUDE_JVMCI 3224 if (EnableJVMCI) { 3225 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); 3226 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); 3227 } 3228 #endif 3229 } 3230 3231 #ifdef COMPILER2 3232 3233 //------------------------------generate_uncommon_trap_blob-------------------- 3234 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3235 // instead. 3236 void SharedRuntime::generate_uncommon_trap_blob() { 3237 // allocate space for the code 3238 ResourceMark rm; 3239 // setup code generation tools 3240 int pad = VerifyThread ? 512 : 0; 3241 #ifdef ASSERT 3242 if (UseStackBanging) { 3243 pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32; 3244 } 3245 #endif 3246 #ifdef _LP64 3247 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 3248 #else 3249 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread) 3250 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread) 3251 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512); 3252 #endif 3253 MacroAssembler* masm = new MacroAssembler(&buffer); 3254 Register O2UnrollBlock = O2; 3255 Register O2klass_index = O2; 3256 3257 // 3258 // This is the entry point for all traps the compiler takes when it thinks 3259 // it cannot handle further execution of compilation code. The frame is 3260 // deoptimized in these cases and converted into interpreter frames for 3261 // execution 3262 // The steps taken by this frame are as follows: 3263 // - push a fake "unpack_frame" 3264 // - call the C routine Deoptimization::uncommon_trap (this function 3265 // packs the current compiled frame into vframe arrays and returns 3266 // information about the number and size of interpreter frames which 3267 // are equivalent to the frame which is being deoptimized) 3268 // - deallocate the "unpack_frame" 3269 // - deallocate the deoptimization frame 3270 // - in a loop using the information returned in the previous step 3271 // push interpreter frames; 3272 // - create a dummy "unpack_frame" 3273 // - call the C routine: Deoptimization::unpack_frames (this function 3274 // lays out values on the interpreter frame which was just created) 3275 // - deallocate the dummy unpack_frame 3276 // - return to the interpreter entry point 3277 // 3278 // Refer to the following methods for more information: 3279 // - Deoptimization::uncommon_trap 3280 // - Deoptimization::unpack_frame 3281 3282 // the unloaded class index is in O0 (first parameter to this blob) 3283 3284 // push a dummy "unpack_frame" 3285 // and call Deoptimization::uncommon_trap to pack the compiled frame into 3286 // vframe array and return the UnrollBlock information 3287 __ save_frame(0); 3288 __ set_last_Java_frame(SP, noreg); 3289 __ mov(I0, O2klass_index); 3290 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // exec mode 3291 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index, O3); 3292 __ reset_last_Java_frame(); 3293 __ mov(O0, O2UnrollBlock->after_save()); 3294 __ restore(); 3295 3296 // deallocate the deoptimized frame taking care to preserve the return values 3297 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3298 __ restore(); 3299 3300 #ifdef ASSERT 3301 { Label L; 3302 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), O1); 3303 __ cmp_and_br_short(O1, Deoptimization::Unpack_uncommon_trap, Assembler::equal, Assembler::pt, L); 3304 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap"); 3305 __ bind(L); 3306 } 3307 #endif 3308 3309 // Allocate new interpreter frame(s) and possible c2i adapter frame 3310 3311 make_new_frames(masm, false); 3312 3313 // push a dummy "unpack_frame" taking care of float return values and 3314 // call Deoptimization::unpack_frames to have the unpacker layout 3315 // information in the interpreter frames just created and then return 3316 // to the interpreter entry point 3317 __ save_frame(0); 3318 __ set_last_Java_frame(SP, noreg); 3319 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case 3320 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3); 3321 __ reset_last_Java_frame(); 3322 __ ret(); 3323 __ delayed()->restore(); 3324 3325 masm->flush(); 3326 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize); 3327 } 3328 3329 #endif // COMPILER2 3330 3331 //------------------------------generate_handler_blob------------------- 3332 // 3333 // Generate a special Compile2Runtime blob that saves all registers, and sets 3334 // up an OopMap. 3335 // 3336 // This blob is jumped to (via a breakpoint and the signal handler) from a 3337 // safepoint in compiled code. On entry to this blob, O7 contains the 3338 // address in the original nmethod at which we should resume normal execution. 3339 // Thus, this blob looks like a subroutine which must preserve lots of 3340 // registers and return normally. Note that O7 is never register-allocated, 3341 // so it is guaranteed to be free here. 3342 // 3343 3344 // The hardest part of what this blob must do is to save the 64-bit %o 3345 // registers in the 32-bit build. A simple 'save' turn the %o's to %i's and 3346 // an interrupt will chop off their heads. Making space in the caller's frame 3347 // first will let us save the 64-bit %o's before save'ing, but we cannot hand 3348 // the adjusted FP off to the GC stack-crawler: this will modify the caller's 3349 // SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save 3350 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). 3351 // Tricky, tricky, tricky... 3352 3353 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3354 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3355 3356 // allocate space for the code 3357 ResourceMark rm; 3358 // setup code generation tools 3359 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3360 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3361 // even larger with TraceJumps 3362 int pad = TraceJumps ? 512 : 0; 3363 CodeBuffer buffer("handler_blob", 1600 + pad, 512); 3364 MacroAssembler* masm = new MacroAssembler(&buffer); 3365 int frame_size_words; 3366 OopMapSet *oop_maps = new OopMapSet(); 3367 OopMap* map = NULL; 3368 3369 int start = __ offset(); 3370 3371 bool cause_return = (poll_type == POLL_AT_RETURN); 3372 // If this causes a return before the processing, then do a "restore" 3373 if (cause_return) { 3374 __ restore(); 3375 } else { 3376 // Make it look like we were called via the poll 3377 // so that frame constructor always sees a valid return address 3378 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7); 3379 __ sub(O7, frame::pc_return_offset, O7); 3380 } 3381 3382 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3383 3384 // setup last_Java_sp (blows G4) 3385 __ set_last_Java_frame(SP, noreg); 3386 3387 // call into the runtime to handle illegal instructions exception 3388 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3389 __ mov(G2_thread, O0); 3390 __ save_thread(L7_thread_cache); 3391 __ call(call_ptr); 3392 __ delayed()->nop(); 3393 3394 // Set an oopmap for the call site. 3395 // We need this not only for callee-saved registers, but also for volatile 3396 // registers that the compiler might be keeping live across a safepoint. 3397 3398 oop_maps->add_gc_map( __ offset() - start, map); 3399 3400 __ restore_thread(L7_thread_cache); 3401 // clear last_Java_sp 3402 __ reset_last_Java_frame(); 3403 3404 // Check for exceptions 3405 Label pending; 3406 3407 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3408 __ br_notnull_short(O1, Assembler::pn, pending); 3409 3410 RegisterSaver::restore_live_registers(masm); 3411 3412 // We are back the the original state on entry and ready to go. 3413 3414 __ retl(); 3415 __ delayed()->nop(); 3416 3417 // Pending exception after the safepoint 3418 3419 __ bind(pending); 3420 3421 RegisterSaver::restore_live_registers(masm); 3422 3423 // We are back the the original state on entry. 3424 3425 // Tail-call forward_exception_entry, with the issuing PC in O7, 3426 // so it looks like the original nmethod called forward_exception_entry. 3427 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3428 __ JMP(O0, 0); 3429 __ delayed()->nop(); 3430 3431 // ------------- 3432 // make sure all code is generated 3433 masm->flush(); 3434 3435 // return exception blob 3436 return SafepointBlob::create(&buffer, oop_maps, frame_size_words); 3437 } 3438 3439 // 3440 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3441 // 3442 // Generate a stub that calls into vm to find out the proper destination 3443 // of a java call. All the argument registers are live at this point 3444 // but since this is generic code we don't know what they are and the caller 3445 // must do any gc of the args. 3446 // 3447 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3448 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3449 3450 // allocate space for the code 3451 ResourceMark rm; 3452 // setup code generation tools 3453 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3454 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3455 // even larger with TraceJumps 3456 int pad = TraceJumps ? 512 : 0; 3457 CodeBuffer buffer(name, 1600 + pad, 512); 3458 MacroAssembler* masm = new MacroAssembler(&buffer); 3459 int frame_size_words; 3460 OopMapSet *oop_maps = new OopMapSet(); 3461 OopMap* map = NULL; 3462 3463 int start = __ offset(); 3464 3465 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3466 3467 int frame_complete = __ offset(); 3468 3469 // setup last_Java_sp (blows G4) 3470 __ set_last_Java_frame(SP, noreg); 3471 3472 // call into the runtime to handle illegal instructions exception 3473 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3474 __ mov(G2_thread, O0); 3475 __ save_thread(L7_thread_cache); 3476 __ call(destination, relocInfo::runtime_call_type); 3477 __ delayed()->nop(); 3478 3479 // O0 contains the address we are going to jump to assuming no exception got installed 3480 3481 // Set an oopmap for the call site. 3482 // We need this not only for callee-saved registers, but also for volatile 3483 // registers that the compiler might be keeping live across a safepoint. 3484 3485 oop_maps->add_gc_map( __ offset() - start, map); 3486 3487 __ restore_thread(L7_thread_cache); 3488 // clear last_Java_sp 3489 __ reset_last_Java_frame(); 3490 3491 // Check for exceptions 3492 Label pending; 3493 3494 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3495 __ br_notnull_short(O1, Assembler::pn, pending); 3496 3497 // get the returned Method* 3498 3499 __ get_vm_result_2(G5_method); 3500 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 3501 3502 // O0 is where we want to jump, overwrite G3 which is saved and scratch 3503 3504 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 3505 3506 RegisterSaver::restore_live_registers(masm); 3507 3508 // We are back the the original state on entry and ready to go. 3509 3510 __ JMP(G3, 0); 3511 __ delayed()->nop(); 3512 3513 // Pending exception after the safepoint 3514 3515 __ bind(pending); 3516 3517 RegisterSaver::restore_live_registers(masm); 3518 3519 // We are back the the original state on entry. 3520 3521 // Tail-call forward_exception_entry, with the issuing PC in O7, 3522 // so it looks like the original nmethod called forward_exception_entry. 3523 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3524 __ JMP(O0, 0); 3525 __ delayed()->nop(); 3526 3527 // ------------- 3528 // make sure all code is generated 3529 masm->flush(); 3530 3531 // return the blob 3532 // frame_size_words or bytes?? 3533 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3534 }