1 /*
   2  * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_sharedRuntime_sparc.cpp.incl"
  27 
  28 #define __ masm->
  29 
  30 #ifdef COMPILER2
  31 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
  32 #endif // COMPILER2
  33 
  34 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  35 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
  36 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
  37 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  38 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  39 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  40 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  41 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
  42 
  43 class RegisterSaver {
  44 
  45   // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
  46   // The Oregs are problematic. In the 32bit build the compiler can
  47   // have O registers live with 64 bit quantities. A window save will
  48   // cut the heads off of the registers. We have to do a very extensive
  49   // stack dance to save and restore these properly.
  50 
  51   // Note that the Oregs problem only exists if we block at either a polling
  52   // page exception a compiled code safepoint that was not originally a call
  53   // or deoptimize following one of these kinds of safepoints.
  54 
  55   // Lots of registers to save.  For all builds, a window save will preserve
  56   // the %i and %l registers.  For the 32-bit longs-in-two entries and 64-bit
  57   // builds a window-save will preserve the %o registers.  In the LION build
  58   // we need to save the 64-bit %o registers which requires we save them
  59   // before the window-save (as then they become %i registers and get their
  60   // heads chopped off on interrupt).  We have to save some %g registers here
  61   // as well.
  62   enum {
  63     // This frame's save area.  Includes extra space for the native call:
  64     // vararg's layout space and the like.  Briefly holds the caller's
  65     // register save area.
  66     call_args_area = frame::register_save_words_sp_offset +
  67                      frame::memory_parameter_word_sp_offset*wordSize,
  68     // Make sure save locations are always 8 byte aligned.
  69     // can't use round_to because it doesn't produce compile time constant
  70     start_of_extra_save_area = ((call_args_area + 7) & ~7),
  71     g1_offset = start_of_extra_save_area, // g-regs needing saving
  72     g3_offset = g1_offset+8,
  73     g4_offset = g3_offset+8,
  74     g5_offset = g4_offset+8,
  75     o0_offset = g5_offset+8,
  76     o1_offset = o0_offset+8,
  77     o2_offset = o1_offset+8,
  78     o3_offset = o2_offset+8,
  79     o4_offset = o3_offset+8,
  80     o5_offset = o4_offset+8,
  81     start_of_flags_save_area = o5_offset+8,
  82     ccr_offset = start_of_flags_save_area,
  83     fsr_offset = ccr_offset + 8,
  84     d00_offset = fsr_offset+8,  // Start of float save area
  85     register_save_size = d00_offset+8*32
  86   };
  87 
  88 
  89   public:
  90 
  91   static int Oexception_offset() { return o0_offset; };
  92   static int G3_offset() { return g3_offset; };
  93   static int G5_offset() { return g5_offset; };
  94   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  95   static void restore_live_registers(MacroAssembler* masm);
  96 
  97   // During deoptimization only the result register need to be restored
  98   // all the other values have already been extracted.
  99 
 100   static void restore_result_registers(MacroAssembler* masm);
 101 };
 102 
 103 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 104   // Record volatile registers as callee-save values in an OopMap so their save locations will be
 105   // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
 106   // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
 107   // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
 108   // (as the stub's I's) when the runtime routine called by the stub creates its frame.
 109   int i;
 110   // Always make the frame size 16 bytr aligned.
 111   int frame_size = round_to(additional_frame_words + register_save_size, 16);
 112   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
 113   int frame_size_in_slots = frame_size / sizeof(jint);
 114   // CodeBlob frame size is in words.
 115   *total_frame_words = frame_size / wordSize;
 116   // OopMap* map = new OopMap(*total_frame_words, 0);
 117   OopMap* map = new OopMap(frame_size_in_slots, 0);
 118 
 119 #if !defined(_LP64)
 120 
 121   // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
 122   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 123   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 124   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
 125   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
 126   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
 127   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
 128 #endif /* _LP64 */
 129 
 130   __ save(SP, -frame_size, SP);
 131 
 132 #ifndef _LP64
 133   // Reload the 64 bit Oregs. Although they are now Iregs we load them
 134   // to Oregs here to avoid interrupts cutting off their heads
 135 
 136   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 137   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 138   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
 139   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
 140   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
 141   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
 142 
 143   __ stx(O0, SP, o0_offset+STACK_BIAS);
 144   map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
 145 
 146   __ stx(O1, SP, o1_offset+STACK_BIAS);
 147 
 148   map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
 149 
 150   __ stx(O2, SP, o2_offset+STACK_BIAS);
 151   map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
 152 
 153   __ stx(O3, SP, o3_offset+STACK_BIAS);
 154   map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
 155 
 156   __ stx(O4, SP, o4_offset+STACK_BIAS);
 157   map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
 158 
 159   __ stx(O5, SP, o5_offset+STACK_BIAS);
 160   map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
 161 #endif /* _LP64 */
 162 
 163 
 164 #ifdef _LP64
 165   int debug_offset = 0;
 166 #else
 167   int debug_offset = 4;
 168 #endif
 169   // Save the G's
 170   __ stx(G1, SP, g1_offset+STACK_BIAS);
 171   map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
 172 
 173   __ stx(G3, SP, g3_offset+STACK_BIAS);
 174   map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
 175 
 176   __ stx(G4, SP, g4_offset+STACK_BIAS);
 177   map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
 178 
 179   __ stx(G5, SP, g5_offset+STACK_BIAS);
 180   map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
 181 
 182   // This is really a waste but we'll keep things as they were for now
 183   if (true) {
 184 #ifndef _LP64
 185     map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
 186     map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
 187     map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
 188     map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
 189     map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
 190     map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
 191     map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
 192     map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
 193     map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
 194     map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
 195 #endif /* _LP64 */
 196   }
 197 
 198 
 199   // Save the flags
 200   __ rdccr( G5 );
 201   __ stx(G5, SP, ccr_offset+STACK_BIAS);
 202   __ stxfsr(SP, fsr_offset+STACK_BIAS);
 203 
 204   // Save all the FP registers
 205   int offset = d00_offset;
 206   for( int i=0; i<64; i+=2 ) {
 207     FloatRegister f = as_FloatRegister(i);
 208     __ stf(FloatRegisterImpl::D,  f, SP, offset+STACK_BIAS);
 209     map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
 210     if (true) {
 211       map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
 212     }
 213     offset += sizeof(double);
 214   }
 215 
 216   // And we're done.
 217 
 218   return map;
 219 }
 220 
 221 
 222 // Pop the current frame and restore all the registers that we
 223 // saved.
 224 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 225 
 226   // Restore all the FP registers
 227   for( int i=0; i<64; i+=2 ) {
 228     __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
 229   }
 230 
 231   __ ldx(SP, ccr_offset+STACK_BIAS, G1);
 232   __ wrccr (G1) ;
 233 
 234   // Restore the G's
 235   // Note that G2 (AKA GThread) must be saved and restored separately.
 236   // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
 237 
 238   __ ldx(SP, g1_offset+STACK_BIAS, G1);
 239   __ ldx(SP, g3_offset+STACK_BIAS, G3);
 240   __ ldx(SP, g4_offset+STACK_BIAS, G4);
 241   __ ldx(SP, g5_offset+STACK_BIAS, G5);
 242 
 243 
 244 #if !defined(_LP64)
 245   // Restore the 64-bit O's.
 246   __ ldx(SP, o0_offset+STACK_BIAS, O0);
 247   __ ldx(SP, o1_offset+STACK_BIAS, O1);
 248   __ ldx(SP, o2_offset+STACK_BIAS, O2);
 249   __ ldx(SP, o3_offset+STACK_BIAS, O3);
 250   __ ldx(SP, o4_offset+STACK_BIAS, O4);
 251   __ ldx(SP, o5_offset+STACK_BIAS, O5);
 252 
 253   // And temporarily place them in TLS
 254 
 255   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 256   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 257   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
 258   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
 259   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
 260   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
 261 #endif /* _LP64 */
 262 
 263   // Restore flags
 264 
 265   __ ldxfsr(SP, fsr_offset+STACK_BIAS);
 266 
 267   __ restore();
 268 
 269 #if !defined(_LP64)
 270   // Now reload the 64bit Oregs after we've restore the window.
 271   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 272   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 273   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
 274   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
 275   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
 276   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
 277 #endif /* _LP64 */
 278 
 279 }
 280 
 281 // Pop the current frame and restore the registers that might be holding
 282 // a result.
 283 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 284 
 285 #if !defined(_LP64)
 286   // 32bit build returns longs in G1
 287   __ ldx(SP, g1_offset+STACK_BIAS, G1);
 288 
 289   // Retrieve the 64-bit O's.
 290   __ ldx(SP, o0_offset+STACK_BIAS, O0);
 291   __ ldx(SP, o1_offset+STACK_BIAS, O1);
 292   // and save to TLS
 293   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 294   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 295 #endif /* _LP64 */
 296 
 297   __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
 298 
 299   __ restore();
 300 
 301 #if !defined(_LP64)
 302   // Now reload the 64bit Oregs after we've restore the window.
 303   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 304   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 305 #endif /* _LP64 */
 306 
 307 }
 308 
 309 // The java_calling_convention describes stack locations as ideal slots on
 310 // a frame with no abi restrictions. Since we must observe abi restrictions
 311 // (like the placement of the register window) the slots must be biased by
 312 // the following value.
 313 static int reg2offset(VMReg r) {
 314   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 315 }
 316 
 317 // ---------------------------------------------------------------------------
 318 // Read the array of BasicTypes from a signature, and compute where the
 319 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
 320 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 321 // refer to 4-byte stack slots.  All stack slots are based off of the window
 322 // top.  VMRegImpl::stack0 refers to the first slot past the 16-word window,
 323 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 324 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
 325 // integer registers.  Values 64-95 are the (32-bit only) float registers.
 326 // Each 32-bit quantity is given its own number, so the integer registers
 327 // (in either 32- or 64-bit builds) use 2 numbers.  For example, there is
 328 // an O0-low and an O0-high.  Essentially, all int register numbers are doubled.
 329 
 330 // Register results are passed in O0-O5, for outgoing call arguments.  To
 331 // convert to incoming arguments, convert all O's to I's.  The regs array
 332 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
 333 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
 334 // 32-bit value was passed).  If both are VMRegImpl::Bad(), it means no value was
 335 // passed (used as a placeholder for the other half of longs and doubles in
 336 // the 64-bit build).  regs[].second() is either VMRegImpl::Bad() or regs[].second() is
 337 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
 338 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
 339 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
 340 // same VMRegPair.
 341 
 342 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 343 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 344 // units regardless of build.
 345 
 346 
 347 // ---------------------------------------------------------------------------
 348 // The compiled Java calling convention.  The Java convention always passes
 349 // 64-bit values in adjacent aligned locations (either registers or stack),
 350 // floats in float registers and doubles in aligned float pairs.  Values are
 351 // packed in the registers.  There is no backing varargs store for values in
 352 // registers.  In the 32-bit build, longs are passed in G1 and G4 (cannot be
 353 // passed in I's, because longs in I's get their heads chopped off at
 354 // interrupt).
 355 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 356                                            VMRegPair *regs,
 357                                            int total_args_passed,
 358                                            int is_outgoing) {
 359   assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
 360 
 361   // Convention is to pack the first 6 int/oop args into the first 6 registers
 362   // (I0-I5), extras spill to the stack.  Then pack the first 8 float args
 363   // into F0-F7, extras spill to the stack.  Then pad all register sets to
 364   // align.  Then put longs and doubles into the same registers as they fit,
 365   // else spill to the stack.
 366   const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
 367   const int flt_reg_max = 8;
 368   //
 369   // Where 32-bit 1-reg longs start being passed
 370   // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
 371   // So make it look like we've filled all the G regs that c2 wants to use.
 372   Register g_reg = TieredCompilation ? noreg : G1;
 373 
 374   // Count int/oop and float args.  See how many stack slots we'll need and
 375   // where the longs & doubles will go.
 376   int int_reg_cnt   = 0;
 377   int flt_reg_cnt   = 0;
 378   // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
 379   // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
 380   int stk_reg_pairs = 0;
 381   for (int i = 0; i < total_args_passed; i++) {
 382     switch (sig_bt[i]) {
 383     case T_LONG:                // LP64, longs compete with int args
 384       assert(sig_bt[i+1] == T_VOID, "");
 385 #ifdef _LP64
 386       if (int_reg_cnt < int_reg_max) int_reg_cnt++;
 387 #endif
 388       break;
 389     case T_OBJECT:
 390     case T_ARRAY:
 391     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 392       if (int_reg_cnt < int_reg_max) int_reg_cnt++;
 393 #ifndef _LP64
 394       else                            stk_reg_pairs++;
 395 #endif
 396       break;
 397     case T_INT:
 398     case T_SHORT:
 399     case T_CHAR:
 400     case T_BYTE:
 401     case T_BOOLEAN:
 402       if (int_reg_cnt < int_reg_max) int_reg_cnt++;
 403       else                            stk_reg_pairs++;
 404       break;
 405     case T_FLOAT:
 406       if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
 407       else                            stk_reg_pairs++;
 408       break;
 409     case T_DOUBLE:
 410       assert(sig_bt[i+1] == T_VOID, "");
 411       break;
 412     case T_VOID:
 413       break;
 414     default:
 415       ShouldNotReachHere();
 416     }
 417   }
 418 
 419   // This is where the longs/doubles start on the stack.
 420   stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
 421 
 422   int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
 423   int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
 424 
 425   // int stk_reg = frame::register_save_words*(wordSize>>2);
 426   // int stk_reg = SharedRuntime::out_preserve_stack_slots();
 427   int stk_reg = 0;
 428   int int_reg = 0;
 429   int flt_reg = 0;
 430 
 431   // Now do the signature layout
 432   for (int i = 0; i < total_args_passed; i++) {
 433     switch (sig_bt[i]) {
 434     case T_INT:
 435     case T_SHORT:
 436     case T_CHAR:
 437     case T_BYTE:
 438     case T_BOOLEAN:
 439 #ifndef _LP64
 440     case T_OBJECT:
 441     case T_ARRAY:
 442     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 443 #endif // _LP64
 444       if (int_reg < int_reg_max) {
 445         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 446         regs[i].set1(r->as_VMReg());
 447       } else {
 448         regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
 449       }
 450       break;
 451 
 452 #ifdef _LP64
 453     case T_OBJECT:
 454     case T_ARRAY:
 455     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 456       if (int_reg < int_reg_max) {
 457         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 458         regs[i].set2(r->as_VMReg());
 459       } else {
 460         regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 461         stk_reg_pairs += 2;
 462       }
 463       break;
 464 #endif // _LP64
 465 
 466     case T_LONG:
 467       assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
 468 #ifdef _LP64
 469         if (int_reg < int_reg_max) {
 470           Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 471           regs[i].set2(r->as_VMReg());
 472         } else {
 473           regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 474           stk_reg_pairs += 2;
 475         }
 476 #else
 477 #ifdef COMPILER2
 478         // For 32-bit build, can't pass longs in O-regs because they become
 479         // I-regs and get trashed.  Use G-regs instead.  G1 and G4 are almost
 480         // spare and available.  This convention isn't used by the Sparc ABI or
 481         // anywhere else. If we're tiered then we don't use G-regs because c1
 482         // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
 483         // G0: zero
 484         // G1: 1st Long arg
 485         // G2: global allocated to TLS
 486         // G3: used in inline cache check
 487         // G4: 2nd Long arg
 488         // G5: used in inline cache check
 489         // G6: used by OS
 490         // G7: used by OS
 491 
 492         if (g_reg == G1) {
 493           regs[i].set2(G1->as_VMReg()); // This long arg in G1
 494           g_reg = G4;                  // Where the next arg goes
 495         } else if (g_reg == G4) {
 496           regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
 497           g_reg = noreg;               // No more longs in registers
 498         } else {
 499           regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 500           stk_reg_pairs += 2;
 501         }
 502 #else // COMPILER2
 503         if (int_reg_pairs + 1 < int_reg_max) {
 504           if (is_outgoing) {
 505             regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
 506           } else {
 507             regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
 508           }
 509           int_reg_pairs += 2;
 510         } else {
 511           regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 512           stk_reg_pairs += 2;
 513         }
 514 #endif // COMPILER2
 515 #endif // _LP64
 516       break;
 517 
 518     case T_FLOAT:
 519       if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
 520       else                       regs[i].set1(    VMRegImpl::stack2reg(stk_reg++));
 521       break;
 522     case T_DOUBLE:
 523       assert(sig_bt[i+1] == T_VOID, "expecting half");
 524       if (flt_reg_pairs + 1 < flt_reg_max) {
 525         regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
 526         flt_reg_pairs += 2;
 527       } else {
 528         regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 529         stk_reg_pairs += 2;
 530       }
 531       break;
 532     case T_VOID: regs[i].set_bad();  break; // Halves of longs & doubles
 533     default:
 534       ShouldNotReachHere();
 535     }
 536   }
 537 
 538   // retun the amount of stack space these arguments will need.
 539   return stk_reg_pairs;
 540 
 541 }
 542 
 543 // Helper class mostly to avoid passing masm everywhere, and handle
 544 // store displacement overflow logic.
 545 class AdapterGenerator {
 546   MacroAssembler *masm;
 547   Register Rdisp;
 548   void set_Rdisp(Register r)  { Rdisp = r; }
 549 
 550   void patch_callers_callsite();
 551   void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
 552 
 553   // base+st_off points to top of argument
 554   int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
 555   int next_arg_offset(const int st_off) {
 556     return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
 557   }
 558 
 559   int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); }
 560   int next_tag_offset(const int st_off) {
 561     return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes();
 562   }
 563 
 564   // Argument slot values may be loaded first into a register because
 565   // they might not fit into displacement.
 566   RegisterOrConstant arg_slot(const int st_off);
 567   RegisterOrConstant next_arg_slot(const int st_off);
 568 
 569   RegisterOrConstant tag_slot(const int st_off);
 570   RegisterOrConstant next_tag_slot(const int st_off);
 571 
 572   // Stores long into offset pointed to by base
 573   void store_c2i_long(Register r, Register base,
 574                       const int st_off, bool is_stack);
 575   void store_c2i_object(Register r, Register base,
 576                         const int st_off);
 577   void store_c2i_int(Register r, Register base,
 578                      const int st_off);
 579   void store_c2i_double(VMReg r_2,
 580                         VMReg r_1, Register base, const int st_off);
 581   void store_c2i_float(FloatRegister f, Register base,
 582                        const int st_off);
 583 
 584  public:
 585   void gen_c2i_adapter(int total_args_passed,
 586                               // VMReg max_arg,
 587                               int comp_args_on_stack, // VMRegStackSlots
 588                               const BasicType *sig_bt,
 589                               const VMRegPair *regs,
 590                               Label& skip_fixup);
 591   void gen_i2c_adapter(int total_args_passed,
 592                               // VMReg max_arg,
 593                               int comp_args_on_stack, // VMRegStackSlots
 594                               const BasicType *sig_bt,
 595                               const VMRegPair *regs);
 596 
 597   AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
 598 };
 599 
 600 
 601 // Patch the callers callsite with entry to compiled code if it exists.
 602 void AdapterGenerator::patch_callers_callsite() {
 603   Label L;
 604   __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
 605   __ br_null(G3_scratch, false, __ pt, L);
 606   // Schedule the branch target address early.
 607   __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
 608   // Call into the VM to patch the caller, then jump to compiled callee
 609   __ save_frame(4);     // Args in compiled layout; do not blow them
 610 
 611   // Must save all the live Gregs the list is:
 612   // G1: 1st Long arg (32bit build)
 613   // G2: global allocated to TLS
 614   // G3: used in inline cache check (scratch)
 615   // G4: 2nd Long arg (32bit build);
 616   // G5: used in inline cache check (methodOop)
 617 
 618   // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
 619 
 620 #ifdef _LP64
 621   // mov(s,d)
 622   __ mov(G1, L1);
 623   __ mov(G4, L4);
 624   __ mov(G5_method, L5);
 625   __ mov(G5_method, O0);         // VM needs target method
 626   __ mov(I7, O1);                // VM needs caller's callsite
 627   // Must be a leaf call...
 628   // can be very far once the blob has been relocated
 629   AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
 630   __ relocate(relocInfo::runtime_call_type);
 631   __ jumpl_to(dest, O7, O7);
 632   __ delayed()->mov(G2_thread, L7_thread_cache);
 633   __ mov(L7_thread_cache, G2_thread);
 634   __ mov(L1, G1);
 635   __ mov(L4, G4);
 636   __ mov(L5, G5_method);
 637 #else
 638   __ stx(G1, FP, -8 + STACK_BIAS);
 639   __ stx(G4, FP, -16 + STACK_BIAS);
 640   __ mov(G5_method, L5);
 641   __ mov(G5_method, O0);         // VM needs target method
 642   __ mov(I7, O1);                // VM needs caller's callsite
 643   // Must be a leaf call...
 644   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
 645   __ delayed()->mov(G2_thread, L7_thread_cache);
 646   __ mov(L7_thread_cache, G2_thread);
 647   __ ldx(FP, -8 + STACK_BIAS, G1);
 648   __ ldx(FP, -16 + STACK_BIAS, G4);
 649   __ mov(L5, G5_method);
 650   __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
 651 #endif /* _LP64 */
 652 
 653   __ restore();      // Restore args
 654   __ bind(L);
 655 }
 656 
 657 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
 658                  Register scratch) {
 659   if (TaggedStackInterpreter) {
 660     RegisterOrConstant slot = tag_slot(st_off);
 661     // have to store zero because local slots can be reused (rats!)
 662     if (t == frame::TagValue) {
 663       __ st_ptr(G0, base, slot);
 664     } else if (t == frame::TagCategory2) {
 665       __ st_ptr(G0, base, slot);
 666       __ st_ptr(G0, base, next_tag_slot(st_off));
 667     } else {
 668       __ mov(t, scratch);
 669       __ st_ptr(scratch, base, slot);
 670     }
 671   }
 672 }
 673 
 674 
 675 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
 676   RegisterOrConstant roc(arg_offset(st_off));
 677   return __ ensure_simm13_or_reg(roc, Rdisp);
 678 }
 679 
 680 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
 681   RegisterOrConstant roc(next_arg_offset(st_off));
 682   return __ ensure_simm13_or_reg(roc, Rdisp);
 683 }
 684 
 685 
 686 RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) {
 687   RegisterOrConstant roc(tag_offset(st_off));
 688   return __ ensure_simm13_or_reg(roc, Rdisp);
 689 }
 690 
 691 RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) {
 692   RegisterOrConstant roc(next_tag_offset(st_off));
 693   return __ ensure_simm13_or_reg(roc, Rdisp);
 694 }
 695 
 696 
 697 // Stores long into offset pointed to by base
 698 void AdapterGenerator::store_c2i_long(Register r, Register base,
 699                                       const int st_off, bool is_stack) {
 700 #ifdef _LP64
 701   // In V9, longs are given 2 64-bit slots in the interpreter, but the
 702   // data is passed in only 1 slot.
 703   __ stx(r, base, next_arg_slot(st_off));
 704 #else
 705 #ifdef COMPILER2
 706   // Misaligned store of 64-bit data
 707   __ stw(r, base, arg_slot(st_off));    // lo bits
 708   __ srlx(r, 32, r);
 709   __ stw(r, base, next_arg_slot(st_off));  // hi bits
 710 #else
 711   if (is_stack) {
 712     // Misaligned store of 64-bit data
 713     __ stw(r, base, arg_slot(st_off));    // lo bits
 714     __ srlx(r, 32, r);
 715     __ stw(r, base, next_arg_slot(st_off));  // hi bits
 716   } else {
 717     __ stw(r->successor(), base, arg_slot(st_off)     ); // lo bits
 718     __ stw(r             , base, next_arg_slot(st_off)); // hi bits
 719   }
 720 #endif // COMPILER2
 721 #endif // _LP64
 722   tag_c2i_arg(frame::TagCategory2, base, st_off, r);
 723 }
 724 
 725 void AdapterGenerator::store_c2i_object(Register r, Register base,
 726                       const int st_off) {
 727   __ st_ptr (r, base, arg_slot(st_off));
 728   tag_c2i_arg(frame::TagReference, base, st_off, r);
 729 }
 730 
 731 void AdapterGenerator::store_c2i_int(Register r, Register base,
 732                    const int st_off) {
 733   __ st (r, base, arg_slot(st_off));
 734   tag_c2i_arg(frame::TagValue, base, st_off, r);
 735 }
 736 
 737 // Stores into offset pointed to by base
 738 void AdapterGenerator::store_c2i_double(VMReg r_2,
 739                       VMReg r_1, Register base, const int st_off) {
 740 #ifdef _LP64
 741   // In V9, doubles are given 2 64-bit slots in the interpreter, but the
 742   // data is passed in only 1 slot.
 743   __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
 744 #else
 745   // Need to marshal 64-bit value from misaligned Lesp loads
 746   __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
 747   __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
 748 #endif
 749   tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch);
 750 }
 751 
 752 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
 753                                        const int st_off) {
 754   __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
 755   tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch);
 756 }
 757 
 758 void AdapterGenerator::gen_c2i_adapter(
 759                             int total_args_passed,
 760                             // VMReg max_arg,
 761                             int comp_args_on_stack, // VMRegStackSlots
 762                             const BasicType *sig_bt,
 763                             const VMRegPair *regs,
 764                             Label& skip_fixup) {
 765 
 766   // Before we get into the guts of the C2I adapter, see if we should be here
 767   // at all.  We've come from compiled code and are attempting to jump to the
 768   // interpreter, which means the caller made a static call to get here
 769   // (vcalls always get a compiled target if there is one).  Check for a
 770   // compiled target.  If there is one, we need to patch the caller's call.
 771   // However we will run interpreted if we come thru here. The next pass
 772   // thru the call site will run compiled. If we ran compiled here then
 773   // we can (theorectically) do endless i2c->c2i->i2c transitions during
 774   // deopt/uncommon trap cycles. If we always go interpreted here then
 775   // we can have at most one and don't need to play any tricks to keep
 776   // from endlessly growing the stack.
 777   //
 778   // Actually if we detected that we had an i2c->c2i transition here we
 779   // ought to be able to reset the world back to the state of the interpreted
 780   // call and not bother building another interpreter arg area. We don't
 781   // do that at this point.
 782 
 783   patch_callers_callsite();
 784 
 785   __ bind(skip_fixup);
 786 
 787   // Since all args are passed on the stack, total_args_passed*wordSize is the
 788   // space we need.  Add in varargs area needed by the interpreter. Round up
 789   // to stack alignment.
 790   const int arg_size = total_args_passed * Interpreter::stackElementSize();
 791   const int varargs_area =
 792                  (frame::varargs_offset - frame::register_save_words)*wordSize;
 793   const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
 794 
 795   int bias = STACK_BIAS;
 796   const int interp_arg_offset = frame::varargs_offset*wordSize +
 797                         (total_args_passed-1)*Interpreter::stackElementSize();
 798 
 799   Register base = SP;
 800 
 801 #ifdef _LP64
 802   // In the 64bit build because of wider slots and STACKBIAS we can run
 803   // out of bits in the displacement to do loads and stores.  Use g3 as
 804   // temporary displacement.
 805   if (! __ is_simm13(extraspace)) {
 806     __ set(extraspace, G3_scratch);
 807     __ sub(SP, G3_scratch, SP);
 808   } else {
 809     __ sub(SP, extraspace, SP);
 810   }
 811   set_Rdisp(G3_scratch);
 812 #else
 813   __ sub(SP, extraspace, SP);
 814 #endif // _LP64
 815 
 816   // First write G1 (if used) to where ever it must go
 817   for (int i=0; i<total_args_passed; i++) {
 818     const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
 819     VMReg r_1 = regs[i].first();
 820     VMReg r_2 = regs[i].second();
 821     if (r_1 == G1_scratch->as_VMReg()) {
 822       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
 823         store_c2i_object(G1_scratch, base, st_off);
 824       } else if (sig_bt[i] == T_LONG) {
 825         assert(!TieredCompilation, "should not use register args for longs");
 826         store_c2i_long(G1_scratch, base, st_off, false);
 827       } else {
 828         store_c2i_int(G1_scratch, base, st_off);
 829       }
 830     }
 831   }
 832 
 833   // Now write the args into the outgoing interpreter space
 834   for (int i=0; i<total_args_passed; i++) {
 835     const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
 836     VMReg r_1 = regs[i].first();
 837     VMReg r_2 = regs[i].second();
 838     if (!r_1->is_valid()) {
 839       assert(!r_2->is_valid(), "");
 840       continue;
 841     }
 842     // Skip G1 if found as we did it first in order to free it up
 843     if (r_1 == G1_scratch->as_VMReg()) {
 844       continue;
 845     }
 846 #ifdef ASSERT
 847     bool G1_forced = false;
 848 #endif // ASSERT
 849     if (r_1->is_stack()) {        // Pretend stack targets are loaded into G1
 850 #ifdef _LP64
 851       Register ld_off = Rdisp;
 852       __ set(reg2offset(r_1) + extraspace + bias, ld_off);
 853 #else
 854       int ld_off = reg2offset(r_1) + extraspace + bias;
 855 #ifdef ASSERT
 856       G1_forced = true;
 857 #endif // ASSERT
 858 #endif // _LP64
 859       r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
 860       if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
 861       else                  __ ldx(base, ld_off, G1_scratch);
 862     }
 863 
 864     if (r_1->is_Register()) {
 865       Register r = r_1->as_Register()->after_restore();
 866       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
 867         store_c2i_object(r, base, st_off);
 868       } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 869         if (TieredCompilation) {
 870           assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
 871         }
 872         store_c2i_long(r, base, st_off, r_2->is_stack());
 873       } else {
 874         store_c2i_int(r, base, st_off);
 875       }
 876     } else {
 877       assert(r_1->is_FloatRegister(), "");
 878       if (sig_bt[i] == T_FLOAT) {
 879         store_c2i_float(r_1->as_FloatRegister(), base, st_off);
 880       } else {
 881         assert(sig_bt[i] == T_DOUBLE, "wrong type");
 882         store_c2i_double(r_2, r_1, base, st_off);
 883       }
 884     }
 885   }
 886 
 887 #ifdef _LP64
 888   // Need to reload G3_scratch, used for temporary displacements.
 889   __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
 890 
 891   // Pass O5_savedSP as an argument to the interpreter.
 892   // The interpreter will restore SP to this value before returning.
 893   __ set(extraspace, G1);
 894   __ add(SP, G1, O5_savedSP);
 895 #else
 896   // Pass O5_savedSP as an argument to the interpreter.
 897   // The interpreter will restore SP to this value before returning.
 898   __ add(SP, extraspace, O5_savedSP);
 899 #endif // _LP64
 900 
 901   __ mov((frame::varargs_offset)*wordSize -
 902          1*Interpreter::stackElementSize()+bias+BytesPerWord, G1);
 903   // Jump to the interpreter just as if interpreter was doing it.
 904   __ jmpl(G3_scratch, 0, G0);
 905   // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
 906   // (really L0) is in use by the compiled frame as a generic temp.  However,
 907   // the interpreter does not know where its args are without some kind of
 908   // arg pointer being passed in.  Pass it in Gargs.
 909   __ delayed()->add(SP, G1, Gargs);
 910 }
 911 
 912 void AdapterGenerator::gen_i2c_adapter(
 913                             int total_args_passed,
 914                             // VMReg max_arg,
 915                             int comp_args_on_stack, // VMRegStackSlots
 916                             const BasicType *sig_bt,
 917                             const VMRegPair *regs) {
 918 
 919   // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
 920   // layout.  Lesp was saved by the calling I-frame and will be restored on
 921   // return.  Meanwhile, outgoing arg space is all owned by the callee
 922   // C-frame, so we can mangle it at will.  After adjusting the frame size,
 923   // hoist register arguments and repack other args according to the compiled
 924   // code convention.  Finally, end in a jump to the compiled code.  The entry
 925   // point address is the start of the buffer.
 926 
 927   // We will only enter here from an interpreted frame and never from after
 928   // passing thru a c2i. Azul allowed this but we do not. If we lose the
 929   // race and use a c2i we will remain interpreted for the race loser(s).
 930   // This removes all sorts of headaches on the x86 side and also eliminates
 931   // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
 932 
 933   // As you can see from the list of inputs & outputs there are not a lot
 934   // of temp registers to work with: mostly G1, G3 & G4.
 935 
 936   // Inputs:
 937   // G2_thread      - TLS
 938   // G5_method      - Method oop
 939   // G4 (Gargs)     - Pointer to interpreter's args
 940   // O0..O4         - free for scratch
 941   // O5_savedSP     - Caller's saved SP, to be restored if needed
 942   // O6             - Current SP!
 943   // O7             - Valid return address
 944   // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
 945 
 946   // Outputs:
 947   // G2_thread      - TLS
 948   // G1, G4         - Outgoing long args in 32-bit build
 949   // O0-O5          - Outgoing args in compiled layout
 950   // O6             - Adjusted or restored SP
 951   // O7             - Valid return address
 952   // L0-L7, I0-I7    - Caller's temps (no frame pushed yet)
 953   // F0-F7          - more outgoing args
 954 
 955 
 956   // Gargs is the incoming argument base, and also an outgoing argument.
 957   __ sub(Gargs, BytesPerWord, Gargs);
 958 
 959 #ifdef ASSERT
 960   {
 961     // on entry OsavedSP and SP should be equal
 962     Label ok;
 963     __ cmp(O5_savedSP, SP);
 964     __ br(Assembler::equal, false, Assembler::pt, ok);
 965     __ delayed()->nop();
 966     __ stop("I5_savedSP not set");
 967     __ should_not_reach_here();
 968     __ bind(ok);
 969   }
 970 #endif
 971 
 972   // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
 973   // WITH O7 HOLDING A VALID RETURN PC
 974   //
 975   // |              |
 976   // :  java stack  :
 977   // |              |
 978   // +--------------+ <--- start of outgoing args
 979   // |   receiver   |   |
 980   // : rest of args :   |---size is java-arg-words
 981   // |              |   |
 982   // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
 983   // |              |   |
 984   // :    unused    :   |---Space for max Java stack, plus stack alignment
 985   // |              |   |
 986   // +--------------+ <--- SP + 16*wordsize
 987   // |              |
 988   // :    window    :
 989   // |              |
 990   // +--------------+ <--- SP
 991 
 992   // WE REPACK THE STACK.  We use the common calling convention layout as
 993   // discovered by calling SharedRuntime::calling_convention.  We assume it
 994   // causes an arbitrary shuffle of memory, which may require some register
 995   // temps to do the shuffle.  We hope for (and optimize for) the case where
 996   // temps are not needed.  We may have to resize the stack slightly, in case
 997   // we need alignment padding (32-bit interpreter can pass longs & doubles
 998   // misaligned, but the compilers expect them aligned).
 999   //
1000   // |              |
1001   // :  java stack  :
1002   // |              |
1003   // +--------------+ <--- start of outgoing args
1004   // |  pad, align  |   |
1005   // +--------------+   |
1006   // | ints, floats |   |---Outgoing stack args, packed low.
1007   // +--------------+   |   First few args in registers.
1008   // :   doubles    :   |
1009   // |   longs      |   |
1010   // +--------------+ <--- SP' + 16*wordsize
1011   // |              |
1012   // :    window    :
1013   // |              |
1014   // +--------------+ <--- SP'
1015 
1016   // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
1017   // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
1018   // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
1019 
1020   // Cut-out for having no stack args.  Since up to 6 args are passed
1021   // in registers, we will commonly have no stack args.
1022   if (comp_args_on_stack > 0) {
1023 
1024     // Convert VMReg stack slots to words.
1025     int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1026     // Round up to miminum stack alignment, in wordSize
1027     comp_words_on_stack = round_to(comp_words_on_stack, 2);
1028     // Now compute the distance from Lesp to SP.  This calculation does not
1029     // include the space for total_args_passed because Lesp has not yet popped
1030     // the arguments.
1031     __ sub(SP, (comp_words_on_stack)*wordSize, SP);
1032   }
1033 
1034   // Will jump to the compiled code just as if compiled code was doing it.
1035   // Pre-load the register-jump target early, to schedule it better.
1036   __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
1037 
1038   // Now generate the shuffle code.  Pick up all register args and move the
1039   // rest through G1_scratch.
1040   for (int i=0; i<total_args_passed; i++) {
1041     if (sig_bt[i] == T_VOID) {
1042       // Longs and doubles are passed in native word order, but misaligned
1043       // in the 32-bit build.
1044       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1045       continue;
1046     }
1047 
1048     // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
1049     // 32-bit build and aligned in the 64-bit build.  Look for the obvious
1050     // ldx/lddf optimizations.
1051 
1052     // Load in argument order going down.
1053     const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
1054     set_Rdisp(G1_scratch);
1055 
1056     VMReg r_1 = regs[i].first();
1057     VMReg r_2 = regs[i].second();
1058     if (!r_1->is_valid()) {
1059       assert(!r_2->is_valid(), "");
1060       continue;
1061     }
1062     if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
1063       r_1 = F8->as_VMReg();        // as part of the load/store shuffle
1064       if (r_2->is_valid()) r_2 = r_1->next();
1065     }
1066     if (r_1->is_Register()) {  // Register argument
1067       Register r = r_1->as_Register()->after_restore();
1068       if (!r_2->is_valid()) {
1069         __ ld(Gargs, arg_slot(ld_off), r);
1070       } else {
1071 #ifdef _LP64
1072         // In V9, longs are given 2 64-bit slots in the interpreter, but the
1073         // data is passed in only 1 slot.
1074         RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
1075               next_arg_slot(ld_off) : arg_slot(ld_off);
1076         __ ldx(Gargs, slot, r);
1077 #else
1078         // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
1079         // stack shuffle.  Load the first 2 longs into G1/G4 later.
1080 #endif
1081       }
1082     } else {
1083       assert(r_1->is_FloatRegister(), "");
1084       if (!r_2->is_valid()) {
1085         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
1086       } else {
1087 #ifdef _LP64
1088         // In V9, doubles are given 2 64-bit slots in the interpreter, but the
1089         // data is passed in only 1 slot.  This code also handles longs that
1090         // are passed on the stack, but need a stack-to-stack move through a
1091         // spare float register.
1092         RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
1093               next_arg_slot(ld_off) : arg_slot(ld_off);
1094         __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
1095 #else
1096         // Need to marshal 64-bit value from misaligned Lesp loads
1097         __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
1098         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
1099 #endif
1100       }
1101     }
1102     // Was the argument really intended to be on the stack, but was loaded
1103     // into F8/F9?
1104     if (regs[i].first()->is_stack()) {
1105       assert(r_1->as_FloatRegister() == F8, "fix this code");
1106       // Convert stack slot to an SP offset
1107       int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
1108       // Store down the shuffled stack word.  Target address _is_ aligned.
1109       RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
1110       if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
1111       else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
1112     }
1113   }
1114   bool made_space = false;
1115 #ifndef _LP64
1116   // May need to pick up a few long args in G1/G4
1117   bool g4_crushed = false;
1118   bool g3_crushed = false;
1119   for (int i=0; i<total_args_passed; i++) {
1120     if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
1121       // Load in argument order going down
1122       int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
1123       // Need to marshal 64-bit value from misaligned Lesp loads
1124       Register r = regs[i].first()->as_Register()->after_restore();
1125       if (r == G1 || r == G4) {
1126         assert(!g4_crushed, "ordering problem");
1127         if (r == G4){
1128           g4_crushed = true;
1129           __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
1130           __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
1131         } else {
1132           // better schedule this way
1133           __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
1134           __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
1135         }
1136         g3_crushed = true;
1137         __ sllx(r, 32, r);
1138         __ or3(G3_scratch, r, r);
1139       } else {
1140         assert(r->is_out(), "longs passed in two O registers");
1141         __ ld  (Gargs, arg_slot(ld_off)     , r->successor()); // Load lo bits
1142         __ ld  (Gargs, next_arg_slot(ld_off), r);              // Load hi bits
1143       }
1144     }
1145   }
1146 #endif
1147 
1148   // Jump to the compiled code just as if compiled code was doing it.
1149   //
1150 #ifndef _LP64
1151     if (g3_crushed) {
1152       // Rats load was wasted, at least it is in cache...
1153       __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
1154     }
1155 #endif /* _LP64 */
1156 
1157     // 6243940 We might end up in handle_wrong_method if
1158     // the callee is deoptimized as we race thru here. If that
1159     // happens we don't want to take a safepoint because the
1160     // caller frame will look interpreted and arguments are now
1161     // "compiled" so it is much better to make this transition
1162     // invisible to the stack walking code. Unfortunately if
1163     // we try and find the callee by normal means a safepoint
1164     // is possible. So we stash the desired callee in the thread
1165     // and the vm will find there should this case occur.
1166     Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1167     __ st_ptr(G5_method, callee_target_addr);
1168 
1169     if (StressNonEntrant) {
1170       // Open a big window for deopt failure
1171       __ save_frame(0);
1172       __ mov(G0, L0);
1173       Label loop;
1174       __ bind(loop);
1175       __ sub(L0, 1, L0);
1176       __ br_null(L0, false, Assembler::pt, loop);
1177       __ delayed()->nop();
1178 
1179       __ restore();
1180     }
1181 
1182 
1183     __ jmpl(G3, 0, G0);
1184     __ delayed()->nop();
1185 }
1186 
1187 // ---------------------------------------------------------------
1188 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1189                                                             int total_args_passed,
1190                                                             // VMReg max_arg,
1191                                                             int comp_args_on_stack, // VMRegStackSlots
1192                                                             const BasicType *sig_bt,
1193                                                             const VMRegPair *regs) {
1194   address i2c_entry = __ pc();
1195 
1196   AdapterGenerator agen(masm);
1197 
1198   agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1199 
1200 
1201   // -------------------------------------------------------------------------
1202   // Generate a C2I adapter.  On entry we know G5 holds the methodOop.  The
1203   // args start out packed in the compiled layout.  They need to be unpacked
1204   // into the interpreter layout.  This will almost always require some stack
1205   // space.  We grow the current (compiled) stack, then repack the args.  We
1206   // finally end in a jump to the generic interpreter entry point.  On exit
1207   // from the interpreter, the interpreter will restore our SP (lest the
1208   // compiled code, which relys solely on SP and not FP, get sick).
1209 
1210   address c2i_unverified_entry = __ pc();
1211   Label skip_fixup;
1212   {
1213 #if !defined(_LP64) && defined(COMPILER2)
1214     Register R_temp   = L0;   // another scratch register
1215 #else
1216     Register R_temp   = G1;   // another scratch register
1217 #endif
1218 
1219     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1220 
1221     __ verify_oop(O0);
1222     __ verify_oop(G5_method);
1223     __ load_klass(O0, G3_scratch);
1224     __ verify_oop(G3_scratch);
1225 
1226 #if !defined(_LP64) && defined(COMPILER2)
1227     __ save(SP, -frame::register_save_words*wordSize, SP);
1228     __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1229     __ verify_oop(R_temp);
1230     __ cmp(G3_scratch, R_temp);
1231     __ restore();
1232 #else
1233     __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1234     __ verify_oop(R_temp);
1235     __ cmp(G3_scratch, R_temp);
1236 #endif
1237 
1238     Label ok, ok2;
1239     __ brx(Assembler::equal, false, Assembler::pt, ok);
1240     __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
1241     __ jump_to(ic_miss, G3_scratch);
1242     __ delayed()->nop();
1243 
1244     __ bind(ok);
1245     // Method might have been compiled since the call site was patched to
1246     // interpreted if that is the case treat it as a miss so we can get
1247     // the call site corrected.
1248     __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
1249     __ bind(ok2);
1250     __ br_null(G3_scratch, false, __ pt, skip_fixup);
1251     __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
1252     __ jump_to(ic_miss, G3_scratch);
1253     __ delayed()->nop();
1254 
1255   }
1256 
1257   address c2i_entry = __ pc();
1258 
1259   agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1260 
1261   __ flush();
1262   return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
1263 
1264 }
1265 
1266 // Helper function for native calling conventions
1267 static VMReg int_stk_helper( int i ) {
1268   // Bias any stack based VMReg we get by ignoring the window area
1269   // but not the register parameter save area.
1270   //
1271   // This is strange for the following reasons. We'd normally expect
1272   // the calling convention to return an VMReg for a stack slot
1273   // completely ignoring any abi reserved area. C2 thinks of that
1274   // abi area as only out_preserve_stack_slots. This does not include
1275   // the area allocated by the C abi to store down integer arguments
1276   // because the java calling convention does not use it. So
1277   // since c2 assumes that there are only out_preserve_stack_slots
1278   // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1279   // location the c calling convention must add in this bias amount
1280   // to make up for the fact that the out_preserve_stack_slots is
1281   // insufficient for C calls. What a mess. I sure hope those 6
1282   // stack words were worth it on every java call!
1283 
1284   // Another way of cleaning this up would be for out_preserve_stack_slots
1285   // to take a parameter to say whether it was C or java calling conventions.
1286   // Then things might look a little better (but not much).
1287 
1288   int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1289   if( mem_parm_offset < 0 ) {
1290     return as_oRegister(i)->as_VMReg();
1291   } else {
1292     int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1293     // Now return a biased offset that will be correct when out_preserve_slots is added back in
1294     return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1295   }
1296 }
1297 
1298 
1299 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1300                                          VMRegPair *regs,
1301                                          int total_args_passed) {
1302 
1303     // Return the number of VMReg stack_slots needed for the args.
1304     // This value does not include an abi space (like register window
1305     // save area).
1306 
1307     // The native convention is V8 if !LP64
1308     // The LP64 convention is the V9 convention which is slightly more sane.
1309 
1310     // We return the amount of VMReg stack slots we need to reserve for all
1311     // the arguments NOT counting out_preserve_stack_slots. Since we always
1312     // have space for storing at least 6 registers to memory we start with that.
1313     // See int_stk_helper for a further discussion.
1314     int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1315 
1316 #ifdef _LP64
1317     // V9 convention: All things "as-if" on double-wide stack slots.
1318     // Hoist any int/ptr/long's in the first 6 to int regs.
1319     // Hoist any flt/dbl's in the first 16 dbl regs.
1320     int j = 0;                  // Count of actual args, not HALVES
1321     for( int i=0; i<total_args_passed; i++, j++ ) {
1322       switch( sig_bt[i] ) {
1323       case T_BOOLEAN:
1324       case T_BYTE:
1325       case T_CHAR:
1326       case T_INT:
1327       case T_SHORT:
1328         regs[i].set1( int_stk_helper( j ) ); break;
1329       case T_LONG:
1330         assert( sig_bt[i+1] == T_VOID, "expecting half" );
1331       case T_ADDRESS: // raw pointers, like current thread, for VM calls
1332       case T_ARRAY:
1333       case T_OBJECT:
1334         regs[i].set2( int_stk_helper( j ) );
1335         break;
1336       case T_FLOAT:
1337         if ( j < 16 ) {
1338           // V9ism: floats go in ODD registers
1339           regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
1340         } else {
1341           // V9ism: floats go in ODD stack slot
1342           regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
1343         }
1344         break;
1345       case T_DOUBLE:
1346         assert( sig_bt[i+1] == T_VOID, "expecting half" );
1347         if ( j < 16 ) {
1348           // V9ism: doubles go in EVEN/ODD regs
1349           regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
1350         } else {
1351           // V9ism: doubles go in EVEN/ODD stack slots
1352           regs[i].set2(VMRegImpl::stack2reg(j<<1));
1353         }
1354         break;
1355       case T_VOID:  regs[i].set_bad(); j--; break; // Do not count HALVES
1356       default:
1357         ShouldNotReachHere();
1358       }
1359       if (regs[i].first()->is_stack()) {
1360         int off =  regs[i].first()->reg2stack();
1361         if (off > max_stack_slots) max_stack_slots = off;
1362       }
1363       if (regs[i].second()->is_stack()) {
1364         int off =  regs[i].second()->reg2stack();
1365         if (off > max_stack_slots) max_stack_slots = off;
1366       }
1367     }
1368 
1369 #else // _LP64
1370     // V8 convention: first 6 things in O-regs, rest on stack.
1371     // Alignment is willy-nilly.
1372     for( int i=0; i<total_args_passed; i++ ) {
1373       switch( sig_bt[i] ) {
1374       case T_ADDRESS: // raw pointers, like current thread, for VM calls
1375       case T_ARRAY:
1376       case T_BOOLEAN:
1377       case T_BYTE:
1378       case T_CHAR:
1379       case T_FLOAT:
1380       case T_INT:
1381       case T_OBJECT:
1382       case T_SHORT:
1383         regs[i].set1( int_stk_helper( i ) );
1384         break;
1385       case T_DOUBLE:
1386       case T_LONG:
1387         assert( sig_bt[i+1] == T_VOID, "expecting half" );
1388         regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
1389         break;
1390       case T_VOID: regs[i].set_bad(); break;
1391       default:
1392         ShouldNotReachHere();
1393       }
1394       if (regs[i].first()->is_stack()) {
1395         int off =  regs[i].first()->reg2stack();
1396         if (off > max_stack_slots) max_stack_slots = off;
1397       }
1398       if (regs[i].second()->is_stack()) {
1399         int off =  regs[i].second()->reg2stack();
1400         if (off > max_stack_slots) max_stack_slots = off;
1401       }
1402     }
1403 #endif // _LP64
1404 
1405   return round_to(max_stack_slots + 1, 2);
1406 
1407 }
1408 
1409 
1410 // ---------------------------------------------------------------------------
1411 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1412   switch (ret_type) {
1413   case T_FLOAT:
1414     __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1415     break;
1416   case T_DOUBLE:
1417     __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1418     break;
1419   }
1420 }
1421 
1422 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1423   switch (ret_type) {
1424   case T_FLOAT:
1425     __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1426     break;
1427   case T_DOUBLE:
1428     __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1429     break;
1430   }
1431 }
1432 
1433 // Check and forward and pending exception.  Thread is stored in
1434 // L7_thread_cache and possibly NOT in G2_thread.  Since this is a native call, there
1435 // is no exception handler.  We merely pop this frame off and throw the
1436 // exception in the caller's frame.
1437 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1438   Label L;
1439   __ br_null(Rex_oop, false, Assembler::pt, L);
1440   __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1441   // Since this is a native call, we *know* the proper exception handler
1442   // without calling into the VM: it's the empty function.  Just pop this
1443   // frame and then jump to forward_exception_entry; O7 will contain the
1444   // native caller's return PC.
1445  AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1446   __ jump_to(exception_entry, G3_scratch);
1447   __ delayed()->restore();      // Pop this frame off.
1448   __ bind(L);
1449 }
1450 
1451 // A simple move of integer like type
1452 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1453   if (src.first()->is_stack()) {
1454     if (dst.first()->is_stack()) {
1455       // stack to stack
1456       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1457       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1458     } else {
1459       // stack to reg
1460       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1461     }
1462   } else if (dst.first()->is_stack()) {
1463     // reg to stack
1464     __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1465   } else {
1466     __ mov(src.first()->as_Register(), dst.first()->as_Register());
1467   }
1468 }
1469 
1470 // On 64 bit we will store integer like items to the stack as
1471 // 64 bits items (sparc abi) even though java would only store
1472 // 32bits for a parameter. On 32bit it will simply be 32 bits
1473 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1474 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1475   if (src.first()->is_stack()) {
1476     if (dst.first()->is_stack()) {
1477       // stack to stack
1478       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1479       __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1480     } else {
1481       // stack to reg
1482       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1483     }
1484   } else if (dst.first()->is_stack()) {
1485     // reg to stack
1486     __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1487   } else {
1488     __ mov(src.first()->as_Register(), dst.first()->as_Register());
1489   }
1490 }
1491 
1492 
1493 // An oop arg. Must pass a handle not the oop itself
1494 static void object_move(MacroAssembler* masm,
1495                         OopMap* map,
1496                         int oop_handle_offset,
1497                         int framesize_in_slots,
1498                         VMRegPair src,
1499                         VMRegPair dst,
1500                         bool is_receiver,
1501                         int* receiver_offset) {
1502 
1503   // must pass a handle. First figure out the location we use as a handle
1504 
1505   if (src.first()->is_stack()) {
1506     // Oop is already on the stack
1507     Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1508     __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1509     __ ld_ptr(rHandle, 0, L4);
1510 #ifdef _LP64
1511     __ movr( Assembler::rc_z, L4, G0, rHandle );
1512 #else
1513     __ tst( L4 );
1514     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1515 #endif
1516     if (dst.first()->is_stack()) {
1517       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1518     }
1519     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1520     if (is_receiver) {
1521       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1522     }
1523     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1524   } else {
1525     // Oop is in an input register pass we must flush it to the stack
1526     const Register rOop = src.first()->as_Register();
1527     const Register rHandle = L5;
1528     int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1529     int offset = oop_slot*VMRegImpl::stack_slot_size;
1530     Label skip;
1531     __ st_ptr(rOop, SP, offset + STACK_BIAS);
1532     if (is_receiver) {
1533       *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
1534     }
1535     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1536     __ add(SP, offset + STACK_BIAS, rHandle);
1537 #ifdef _LP64
1538     __ movr( Assembler::rc_z, rOop, G0, rHandle );
1539 #else
1540     __ tst( rOop );
1541     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1542 #endif
1543 
1544     if (dst.first()->is_stack()) {
1545       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1546     } else {
1547       __ mov(rHandle, dst.first()->as_Register());
1548     }
1549   }
1550 }
1551 
1552 // A float arg may have to do float reg int reg conversion
1553 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1554   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1555 
1556   if (src.first()->is_stack()) {
1557     if (dst.first()->is_stack()) {
1558       // stack to stack the easiest of the bunch
1559       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1560       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1561     } else {
1562       // stack to reg
1563       if (dst.first()->is_Register()) {
1564         __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1565       } else {
1566         __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1567       }
1568     }
1569   } else if (dst.first()->is_stack()) {
1570     // reg to stack
1571     if (src.first()->is_Register()) {
1572       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1573     } else {
1574       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1575     }
1576   } else {
1577     // reg to reg
1578     if (src.first()->is_Register()) {
1579       if (dst.first()->is_Register()) {
1580         // gpr -> gpr
1581         __ mov(src.first()->as_Register(), dst.first()->as_Register());
1582       } else {
1583         // gpr -> fpr
1584         __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1585         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1586       }
1587     } else if (dst.first()->is_Register()) {
1588       // fpr -> gpr
1589       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1590       __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1591     } else {
1592       // fpr -> fpr
1593       // In theory these overlap but the ordering is such that this is likely a nop
1594       if ( src.first() != dst.first()) {
1595         __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1596       }
1597     }
1598   }
1599 }
1600 
1601 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1602   VMRegPair src_lo(src.first());
1603   VMRegPair src_hi(src.second());
1604   VMRegPair dst_lo(dst.first());
1605   VMRegPair dst_hi(dst.second());
1606   simple_move32(masm, src_lo, dst_lo);
1607   simple_move32(masm, src_hi, dst_hi);
1608 }
1609 
1610 // A long move
1611 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1612 
1613   // Do the simple ones here else do two int moves
1614   if (src.is_single_phys_reg() ) {
1615     if (dst.is_single_phys_reg()) {
1616       __ mov(src.first()->as_Register(), dst.first()->as_Register());
1617     } else {
1618       // split src into two separate registers
1619       // Remember hi means hi address or lsw on sparc
1620       // Move msw to lsw
1621       if (dst.second()->is_reg()) {
1622         // MSW -> MSW
1623         __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1624         // Now LSW -> LSW
1625         // this will only move lo -> lo and ignore hi
1626         VMRegPair split(dst.second());
1627         simple_move32(masm, src, split);
1628       } else {
1629         VMRegPair split(src.first(), L4->as_VMReg());
1630         // MSW -> MSW (lo ie. first word)
1631         __ srax(src.first()->as_Register(), 32, L4);
1632         split_long_move(masm, split, dst);
1633       }
1634     }
1635   } else if (dst.is_single_phys_reg()) {
1636     if (src.is_adjacent_aligned_on_stack(2)) {
1637       __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1638     } else {
1639       // dst is a single reg.
1640       // Remember lo is low address not msb for stack slots
1641       // and lo is the "real" register for registers
1642       // src is
1643 
1644       VMRegPair split;
1645 
1646       if (src.first()->is_reg()) {
1647         // src.lo (msw) is a reg, src.hi is stk/reg
1648         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1649         split.set_pair(dst.first(), src.first());
1650       } else {
1651         // msw is stack move to L5
1652         // lsw is stack move to dst.lo (real reg)
1653         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1654         split.set_pair(dst.first(), L5->as_VMReg());
1655       }
1656 
1657       // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1658       // msw   -> src.lo/L5,  lsw -> dst.lo
1659       split_long_move(masm, src, split);
1660 
1661       // So dst now has the low order correct position the
1662       // msw half
1663       __ sllx(split.first()->as_Register(), 32, L5);
1664 
1665       const Register d = dst.first()->as_Register();
1666       __ or3(L5, d, d);
1667     }
1668   } else {
1669     // For LP64 we can probably do better.
1670     split_long_move(masm, src, dst);
1671   }
1672 }
1673 
1674 // A double move
1675 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1676 
1677   // The painful thing here is that like long_move a VMRegPair might be
1678   // 1: a single physical register
1679   // 2: two physical registers (v8)
1680   // 3: a physical reg [lo] and a stack slot [hi] (v8)
1681   // 4: two stack slots
1682 
1683   // Since src is always a java calling convention we know that the src pair
1684   // is always either all registers or all stack (and aligned?)
1685 
1686   // in a register [lo] and a stack slot [hi]
1687   if (src.first()->is_stack()) {
1688     if (dst.first()->is_stack()) {
1689       // stack to stack the easiest of the bunch
1690       // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1691       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1692       __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1693       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1694       __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1695     } else {
1696       // stack to reg
1697       if (dst.second()->is_stack()) {
1698         // stack -> reg, stack -> stack
1699         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1700         if (dst.first()->is_Register()) {
1701           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1702         } else {
1703           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1704         }
1705         // This was missing. (very rare case)
1706         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1707       } else {
1708         // stack -> reg
1709         // Eventually optimize for alignment QQQ
1710         if (dst.first()->is_Register()) {
1711           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1712           __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1713         } else {
1714           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1715           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1716         }
1717       }
1718     }
1719   } else if (dst.first()->is_stack()) {
1720     // reg to stack
1721     if (src.first()->is_Register()) {
1722       // Eventually optimize for alignment QQQ
1723       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1724       if (src.second()->is_stack()) {
1725         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1726         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1727       } else {
1728         __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1729       }
1730     } else {
1731       // fpr to stack
1732       if (src.second()->is_stack()) {
1733         ShouldNotReachHere();
1734       } else {
1735         // Is the stack aligned?
1736         if (reg2offset(dst.first()) & 0x7) {
1737           // No do as pairs
1738           __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1739           __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1740         } else {
1741           __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1742         }
1743       }
1744     }
1745   } else {
1746     // reg to reg
1747     if (src.first()->is_Register()) {
1748       if (dst.first()->is_Register()) {
1749         // gpr -> gpr
1750         __ mov(src.first()->as_Register(), dst.first()->as_Register());
1751         __ mov(src.second()->as_Register(), dst.second()->as_Register());
1752       } else {
1753         // gpr -> fpr
1754         // ought to be able to do a single store
1755         __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1756         __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1757         // ought to be able to do a single load
1758         __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1759         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1760       }
1761     } else if (dst.first()->is_Register()) {
1762       // fpr -> gpr
1763       // ought to be able to do a single store
1764       __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1765       // ought to be able to do a single load
1766       // REMEMBER first() is low address not LSB
1767       __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1768       if (dst.second()->is_Register()) {
1769         __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1770       } else {
1771         __ ld(FP, -4 + STACK_BIAS, L4);
1772         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1773       }
1774     } else {
1775       // fpr -> fpr
1776       // In theory these overlap but the ordering is such that this is likely a nop
1777       if ( src.first() != dst.first()) {
1778         __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1779       }
1780     }
1781   }
1782 }
1783 
1784 // Creates an inner frame if one hasn't already been created, and
1785 // saves a copy of the thread in L7_thread_cache
1786 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1787   if (!*already_created) {
1788     __ save_frame(0);
1789     // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1790     // Don't use save_thread because it smashes G2 and we merely want to save a
1791     // copy
1792     __ mov(G2_thread, L7_thread_cache);
1793     *already_created = true;
1794   }
1795 }
1796 
1797 // ---------------------------------------------------------------------------
1798 // Generate a native wrapper for a given method.  The method takes arguments
1799 // in the Java compiled code convention, marshals them to the native
1800 // convention (handlizes oops, etc), transitions to native, makes the call,
1801 // returns to java state (possibly blocking), unhandlizes any result and
1802 // returns.
1803 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1804                                                 methodHandle method,
1805                                                 int total_in_args,
1806                                                 int comp_args_on_stack, // in VMRegStackSlots
1807                                                 BasicType *in_sig_bt,
1808                                                 VMRegPair *in_regs,
1809                                                 BasicType ret_type) {
1810 
1811   // Native nmethod wrappers never take possesion of the oop arguments.
1812   // So the caller will gc the arguments. The only thing we need an
1813   // oopMap for is if the call is static
1814   //
1815   // An OopMap for lock (and class if static), and one for the VM call itself
1816   OopMapSet *oop_maps = new OopMapSet();
1817   intptr_t start = (intptr_t)__ pc();
1818 
1819   // First thing make an ic check to see if we should even be here
1820   {
1821     Label L;
1822     const Register temp_reg = G3_scratch;
1823     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1824     __ verify_oop(O0);
1825     __ load_klass(O0, temp_reg);
1826     __ cmp(temp_reg, G5_inline_cache_reg);
1827     __ brx(Assembler::equal, true, Assembler::pt, L);
1828     __ delayed()->nop();
1829 
1830     __ jump_to(ic_miss, temp_reg);
1831     __ delayed()->nop();
1832     __ align(CodeEntryAlignment);
1833     __ bind(L);
1834   }
1835 
1836   int vep_offset = ((intptr_t)__ pc()) - start;
1837 
1838 #ifdef COMPILER1
1839   if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1840     // Object.hashCode can pull the hashCode from the header word
1841     // instead of doing a full VM transition once it's been computed.
1842     // Since hashCode is usually polymorphic at call sites we can't do
1843     // this optimization at the call site without a lot of work.
1844     Label slowCase;
1845     Register receiver             = O0;
1846     Register result               = O0;
1847     Register header               = G3_scratch;
1848     Register hash                 = G3_scratch; // overwrite header value with hash value
1849     Register mask                 = G1;         // to get hash field from header
1850 
1851     // Read the header and build a mask to get its hash field.  Give up if the object is not unlocked.
1852     // We depend on hash_mask being at most 32 bits and avoid the use of
1853     // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
1854     // vm: see markOop.hpp.
1855     __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
1856     __ sethi(markOopDesc::hash_mask, mask);
1857     __ btst(markOopDesc::unlocked_value, header);
1858     __ br(Assembler::zero, false, Assembler::pn, slowCase);
1859     if (UseBiasedLocking) {
1860       // Check if biased and fall through to runtime if so
1861       __ delayed()->nop();
1862       __ btst(markOopDesc::biased_lock_bit_in_place, header);
1863       __ br(Assembler::notZero, false, Assembler::pn, slowCase);
1864     }
1865     __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
1866 
1867     // Check for a valid (non-zero) hash code and get its value.
1868 #ifdef _LP64
1869     __ srlx(header, markOopDesc::hash_shift, hash);
1870 #else
1871     __ srl(header, markOopDesc::hash_shift, hash);
1872 #endif
1873     __ andcc(hash, mask, hash);
1874     __ br(Assembler::equal, false, Assembler::pn, slowCase);
1875     __ delayed()->nop();
1876 
1877     // leaf return.
1878     __ retl();
1879     __ delayed()->mov(hash, result);
1880     __ bind(slowCase);
1881   }
1882 #endif // COMPILER1
1883 
1884 
1885   // We have received a description of where all the java arg are located
1886   // on entry to the wrapper. We need to convert these args to where
1887   // the jni function will expect them. To figure out where they go
1888   // we convert the java signature to a C signature by inserting
1889   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1890 
1891   int total_c_args = total_in_args + 1;
1892   if (method->is_static()) {
1893     total_c_args++;
1894   }
1895 
1896   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1897   VMRegPair  * out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);
1898 
1899   int argc = 0;
1900   out_sig_bt[argc++] = T_ADDRESS;
1901   if (method->is_static()) {
1902     out_sig_bt[argc++] = T_OBJECT;
1903   }
1904 
1905   for (int i = 0; i < total_in_args ; i++ ) {
1906     out_sig_bt[argc++] = in_sig_bt[i];
1907   }
1908 
1909   // Now figure out where the args must be stored and how much stack space
1910   // they require (neglecting out_preserve_stack_slots but space for storing
1911   // the 1st six register arguments). It's weird see int_stk_helper.
1912   //
1913   int out_arg_slots;
1914   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1915 
1916   // Compute framesize for the wrapper.  We need to handlize all oops in
1917   // registers. We must create space for them here that is disjoint from
1918   // the windowed save area because we have no control over when we might
1919   // flush the window again and overwrite values that gc has since modified.
1920   // (The live window race)
1921   //
1922   // We always just allocate 6 word for storing down these object. This allow
1923   // us to simply record the base and use the Ireg number to decide which
1924   // slot to use. (Note that the reg number is the inbound number not the
1925   // outbound number).
1926   // We must shuffle args to match the native convention, and include var-args space.
1927 
1928   // Calculate the total number of stack slots we will need.
1929 
1930   // First count the abi requirement plus all of the outgoing args
1931   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1932 
1933   // Now the space for the inbound oop handle area
1934 
1935   int oop_handle_offset = stack_slots;
1936   stack_slots += 6*VMRegImpl::slots_per_word;
1937 
1938   // Now any space we need for handlizing a klass if static method
1939 
1940   int oop_temp_slot_offset = 0;
1941   int klass_slot_offset = 0;
1942   int klass_offset = -1;
1943   int lock_slot_offset = 0;
1944   bool is_static = false;
1945 
1946   if (method->is_static()) {
1947     klass_slot_offset = stack_slots;
1948     stack_slots += VMRegImpl::slots_per_word;
1949     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1950     is_static = true;
1951   }
1952 
1953   // Plus a lock if needed
1954 
1955   if (method->is_synchronized()) {
1956     lock_slot_offset = stack_slots;
1957     stack_slots += VMRegImpl::slots_per_word;
1958   }
1959 
1960   // Now a place to save return value or as a temporary for any gpr -> fpr moves
1961   stack_slots += 2;
1962 
1963   // Ok The space we have allocated will look like:
1964   //
1965   //
1966   // FP-> |                     |
1967   //      |---------------------|
1968   //      | 2 slots for moves   |
1969   //      |---------------------|
1970   //      | lock box (if sync)  |
1971   //      |---------------------| <- lock_slot_offset
1972   //      | klass (if static)   |
1973   //      |---------------------| <- klass_slot_offset
1974   //      | oopHandle area      |
1975   //      |---------------------| <- oop_handle_offset
1976   //      | outbound memory     |
1977   //      | based arguments     |
1978   //      |                     |
1979   //      |---------------------|
1980   //      | vararg area         |
1981   //      |---------------------|
1982   //      |                     |
1983   // SP-> | out_preserved_slots |
1984   //
1985   //
1986 
1987 
1988   // Now compute actual number of stack words we need rounding to make
1989   // stack properly aligned.
1990   stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
1991 
1992   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1993 
1994   // Generate stack overflow check before creating frame
1995   __ generate_stack_overflow_check(stack_size);
1996 
1997   // Generate a new frame for the wrapper.
1998   __ save(SP, -stack_size, SP);
1999 
2000   int frame_complete = ((intptr_t)__ pc()) - start;
2001 
2002   __ verify_thread();
2003 
2004 
2005   //
2006   // We immediately shuffle the arguments so that any vm call we have to
2007   // make from here on out (sync slow path, jvmti, etc.) we will have
2008   // captured the oops from our caller and have a valid oopMap for
2009   // them.
2010 
2011   // -----------------
2012   // The Grand Shuffle
2013   //
2014   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2015   // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2016   // the class mirror instead of a receiver.  This pretty much guarantees that
2017   // register layout will not match.  We ignore these extra arguments during
2018   // the shuffle. The shuffle is described by the two calling convention
2019   // vectors we have in our possession. We simply walk the java vector to
2020   // get the source locations and the c vector to get the destinations.
2021   // Because we have a new window and the argument registers are completely
2022   // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2023   // here.
2024 
2025   // This is a trick. We double the stack slots so we can claim
2026   // the oops in the caller's frame. Since we are sure to have
2027   // more args than the caller doubling is enough to make
2028   // sure we can capture all the incoming oop args from the
2029   // caller.
2030   //
2031   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2032   int c_arg = total_c_args - 1;
2033   // Record sp-based slot for receiver on stack for non-static methods
2034   int receiver_offset = -1;
2035 
2036   // We move the arguments backward because the floating point registers
2037   // destination will always be to a register with a greater or equal register
2038   // number or the stack.
2039 
2040 #ifdef ASSERT
2041   bool reg_destroyed[RegisterImpl::number_of_registers];
2042   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2043   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2044     reg_destroyed[r] = false;
2045   }
2046   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2047     freg_destroyed[f] = false;
2048   }
2049 
2050 #endif /* ASSERT */
2051 
2052   for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
2053 
2054 #ifdef ASSERT
2055     if (in_regs[i].first()->is_Register()) {
2056       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2057     } else if (in_regs[i].first()->is_FloatRegister()) {
2058       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2059     }
2060     if (out_regs[c_arg].first()->is_Register()) {
2061       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2062     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2063       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2064     }
2065 #endif /* ASSERT */
2066 
2067     switch (in_sig_bt[i]) {
2068       case T_ARRAY:
2069       case T_OBJECT:
2070         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2071                     ((i == 0) && (!is_static)),
2072                     &receiver_offset);
2073         break;
2074       case T_VOID:
2075         break;
2076 
2077       case T_FLOAT:
2078         float_move(masm, in_regs[i], out_regs[c_arg]);
2079           break;
2080 
2081       case T_DOUBLE:
2082         assert( i + 1 < total_in_args &&
2083                 in_sig_bt[i + 1] == T_VOID &&
2084                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2085         double_move(masm, in_regs[i], out_regs[c_arg]);
2086         break;
2087 
2088       case T_LONG :
2089         long_move(masm, in_regs[i], out_regs[c_arg]);
2090         break;
2091 
2092       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2093 
2094       default:
2095         move32_64(masm, in_regs[i], out_regs[c_arg]);
2096     }
2097   }
2098 
2099   // Pre-load a static method's oop into O1.  Used both by locking code and
2100   // the normal JNI call code.
2101   if (method->is_static()) {
2102     __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
2103 
2104     // Now handlize the static class mirror in O1.  It's known not-null.
2105     __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2106     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2107     __ add(SP, klass_offset + STACK_BIAS, O1);
2108   }
2109 
2110 
2111   const Register L6_handle = L6;
2112 
2113   if (method->is_synchronized()) {
2114     __ mov(O1, L6_handle);
2115   }
2116 
2117   // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2118   // except O6/O7. So if we must call out we must push a new frame. We immediately
2119   // push a new frame and flush the windows.
2120 
2121 #ifdef _LP64
2122   intptr_t thepc = (intptr_t) __ pc();
2123   {
2124     address here = __ pc();
2125     // Call the next instruction
2126     __ call(here + 8, relocInfo::none);
2127     __ delayed()->nop();
2128   }
2129 #else
2130   intptr_t thepc = __ load_pc_address(O7, 0);
2131 #endif /* _LP64 */
2132 
2133   // We use the same pc/oopMap repeatedly when we call out
2134   oop_maps->add_gc_map(thepc - start, map);
2135 
2136   // O7 now has the pc loaded that we will use when we finally call to native.
2137 
2138   // Save thread in L7; it crosses a bunch of VM calls below
2139   // Don't use save_thread because it smashes G2 and we merely
2140   // want to save a copy
2141   __ mov(G2_thread, L7_thread_cache);
2142 
2143 
2144   // If we create an inner frame once is plenty
2145   // when we create it we must also save G2_thread
2146   bool inner_frame_created = false;
2147 
2148   // dtrace method entry support
2149   {
2150     SkipIfEqual skip_if(
2151       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2152     // create inner frame
2153     __ save_frame(0);
2154     __ mov(G2_thread, L7_thread_cache);
2155     __ set_oop_constant(JNIHandles::make_local(method()), O1);
2156     __ call_VM_leaf(L7_thread_cache,
2157          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2158          G2_thread, O1);
2159     __ restore();
2160   }
2161 
2162   // RedefineClasses() tracing support for obsolete method entry
2163   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2164     // create inner frame
2165     __ save_frame(0);
2166     __ mov(G2_thread, L7_thread_cache);
2167     __ set_oop_constant(JNIHandles::make_local(method()), O1);
2168     __ call_VM_leaf(L7_thread_cache,
2169          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2170          G2_thread, O1);
2171     __ restore();
2172   }
2173 
2174   // We are in the jni frame unless saved_frame is true in which case
2175   // we are in one frame deeper (the "inner" frame). If we are in the
2176   // "inner" frames the args are in the Iregs and if the jni frame then
2177   // they are in the Oregs.
2178   // If we ever need to go to the VM (for locking, jvmti) then
2179   // we will always be in the "inner" frame.
2180 
2181   // Lock a synchronized method
2182   int lock_offset = -1;         // Set if locked
2183   if (method->is_synchronized()) {
2184     Register Roop = O1;
2185     const Register L3_box = L3;
2186 
2187     create_inner_frame(masm, &inner_frame_created);
2188 
2189     __ ld_ptr(I1, 0, O1);
2190     Label done;
2191 
2192     lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2193     __ add(FP, lock_offset+STACK_BIAS, L3_box);
2194 #ifdef ASSERT
2195     if (UseBiasedLocking) {
2196       // making the box point to itself will make it clear it went unused
2197       // but also be obviously invalid
2198       __ st_ptr(L3_box, L3_box, 0);
2199     }
2200 #endif // ASSERT
2201     //
2202     // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2203     //
2204     __ compiler_lock_object(Roop, L1,    L3_box, L2);
2205     __ br(Assembler::equal, false, Assembler::pt, done);
2206     __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2207 
2208 
2209     // None of the above fast optimizations worked so we have to get into the
2210     // slow case of monitor enter.  Inline a special case of call_VM that
2211     // disallows any pending_exception.
2212     __ mov(Roop, O0);            // Need oop in O0
2213     __ mov(L3_box, O1);
2214 
2215     // Record last_Java_sp, in case the VM code releases the JVM lock.
2216 
2217     __ set_last_Java_frame(FP, I7);
2218 
2219     // do the call
2220     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2221     __ delayed()->mov(L7_thread_cache, O2);
2222 
2223     __ restore_thread(L7_thread_cache); // restore G2_thread
2224     __ reset_last_Java_frame();
2225 
2226 #ifdef ASSERT
2227     { Label L;
2228     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2229     __ br_null(O0, false, Assembler::pt, L);
2230     __ delayed()->nop();
2231     __ stop("no pending exception allowed on exit from IR::monitorenter");
2232     __ bind(L);
2233     }
2234 #endif
2235     __ bind(done);
2236   }
2237 
2238 
2239   // Finally just about ready to make the JNI call
2240 
2241   __ flush_windows();
2242   if (inner_frame_created) {
2243     __ restore();
2244   } else {
2245     // Store only what we need from this frame
2246     // QQQ I think that non-v9 (like we care) we don't need these saves
2247     // either as the flush traps and the current window goes too.
2248     __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2249     __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2250   }
2251 
2252   // get JNIEnv* which is first argument to native
2253 
2254   __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2255 
2256   // Use that pc we placed in O7 a while back as the current frame anchor
2257 
2258   __ set_last_Java_frame(SP, O7);
2259 
2260   // Transition from _thread_in_Java to _thread_in_native.
2261   __ set(_thread_in_native, G3_scratch);
2262   __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2263 
2264   // We flushed the windows ages ago now mark them as flushed
2265 
2266   // mark windows as flushed
2267   __ set(JavaFrameAnchor::flushed, G3_scratch);
2268 
2269   Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2270 
2271 #ifdef _LP64
2272   AddressLiteral dest(method->native_function());
2273   __ relocate(relocInfo::runtime_call_type);
2274   __ jumpl_to(dest, O7, O7);
2275 #else
2276   __ call(method->native_function(), relocInfo::runtime_call_type);
2277 #endif
2278   __ delayed()->st(G3_scratch, flags);
2279 
2280   __ restore_thread(L7_thread_cache); // restore G2_thread
2281 
2282   // Unpack native results.  For int-types, we do any needed sign-extension
2283   // and move things into I0.  The return value there will survive any VM
2284   // calls for blocking or unlocking.  An FP or OOP result (handle) is done
2285   // specially in the slow-path code.
2286   switch (ret_type) {
2287   case T_VOID:    break;        // Nothing to do!
2288   case T_FLOAT:   break;        // Got it where we want it (unless slow-path)
2289   case T_DOUBLE:  break;        // Got it where we want it (unless slow-path)
2290   // In 64 bits build result is in O0, in O0, O1 in 32bit build
2291   case T_LONG:
2292 #ifndef _LP64
2293                   __ mov(O1, I1);
2294 #endif
2295                   // Fall thru
2296   case T_OBJECT:                // Really a handle
2297   case T_ARRAY:
2298   case T_INT:
2299                   __ mov(O0, I0);
2300                   break;
2301   case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2302   case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
2303   case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
2304   case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
2305     break;                      // Cannot de-handlize until after reclaiming jvm_lock
2306   default:
2307     ShouldNotReachHere();
2308   }
2309 
2310   // must we block?
2311 
2312   // Block, if necessary, before resuming in _thread_in_Java state.
2313   // In order for GC to work, don't clear the last_Java_sp until after blocking.
2314   { Label no_block;
2315     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2316 
2317     // Switch thread to "native transition" state before reading the synchronization state.
2318     // This additional state is necessary because reading and testing the synchronization
2319     // state is not atomic w.r.t. GC, as this scenario demonstrates:
2320     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2321     //     VM thread changes sync state to synchronizing and suspends threads for GC.
2322     //     Thread A is resumed to finish this native method, but doesn't block here since it
2323     //     didn't see any synchronization is progress, and escapes.
2324     __ set(_thread_in_native_trans, G3_scratch);
2325     __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2326     if(os::is_MP()) {
2327       if (UseMembar) {
2328         // Force this write out before the read below
2329         __ membar(Assembler::StoreLoad);
2330       } else {
2331         // Write serialization page so VM thread can do a pseudo remote membar.
2332         // We use the current thread pointer to calculate a thread specific
2333         // offset to write to within the page. This minimizes bus traffic
2334         // due to cache line collision.
2335         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2336       }
2337     }
2338     __ load_contents(sync_state, G3_scratch);
2339     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2340 
2341     Label L;
2342     Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2343     __ br(Assembler::notEqual, false, Assembler::pn, L);
2344     __ delayed()->ld(suspend_state, G3_scratch);
2345     __ cmp(G3_scratch, 0);
2346     __ br(Assembler::equal, false, Assembler::pt, no_block);
2347     __ delayed()->nop();
2348     __ bind(L);
2349 
2350     // Block.  Save any potential method result value before the operation and
2351     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2352     // lets us share the oopMap we used when we went native rather the create
2353     // a distinct one for this pc
2354     //
2355     save_native_result(masm, ret_type, stack_slots);
2356     __ call_VM_leaf(L7_thread_cache,
2357                     CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2358                     G2_thread);
2359 
2360     // Restore any method result value
2361     restore_native_result(masm, ret_type, stack_slots);
2362     __ bind(no_block);
2363   }
2364 
2365   // thread state is thread_in_native_trans. Any safepoint blocking has already
2366   // happened so we can now change state to _thread_in_Java.
2367 
2368 
2369   __ set(_thread_in_Java, G3_scratch);
2370   __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2371 
2372 
2373   Label no_reguard;
2374   __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2375   __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
2376   __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
2377   __ delayed()->nop();
2378 
2379     save_native_result(masm, ret_type, stack_slots);
2380   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2381   __ delayed()->nop();
2382 
2383   __ restore_thread(L7_thread_cache); // restore G2_thread
2384     restore_native_result(masm, ret_type, stack_slots);
2385 
2386   __ bind(no_reguard);
2387 
2388   // Handle possible exception (will unlock if necessary)
2389 
2390   // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2391 
2392   // Unlock
2393   if (method->is_synchronized()) {
2394     Label done;
2395     Register I2_ex_oop = I2;
2396     const Register L3_box = L3;
2397     // Get locked oop from the handle we passed to jni
2398     __ ld_ptr(L6_handle, 0, L4);
2399     __ add(SP, lock_offset+STACK_BIAS, L3_box);
2400     // Must save pending exception around the slow-path VM call.  Since it's a
2401     // leaf call, the pending exception (if any) can be kept in a register.
2402     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2403     // Now unlock
2404     //                       (Roop, Rmark, Rbox,   Rscratch)
2405     __ compiler_unlock_object(L4,   L1,    L3_box, L2);
2406     __ br(Assembler::equal, false, Assembler::pt, done);
2407     __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2408 
2409     // save and restore any potential method result value around the unlocking
2410     // operation.  Will save in I0 (or stack for FP returns).
2411     save_native_result(masm, ret_type, stack_slots);
2412 
2413     // Must clear pending-exception before re-entering the VM.  Since this is
2414     // a leaf call, pending-exception-oop can be safely kept in a register.
2415     __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2416 
2417     // slow case of monitor enter.  Inline a special case of call_VM that
2418     // disallows any pending_exception.
2419     __ mov(L3_box, O1);
2420 
2421     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2422     __ delayed()->mov(L4, O0);              // Need oop in O0
2423 
2424     __ restore_thread(L7_thread_cache); // restore G2_thread
2425 
2426 #ifdef ASSERT
2427     { Label L;
2428     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2429     __ br_null(O0, false, Assembler::pt, L);
2430     __ delayed()->nop();
2431     __ stop("no pending exception allowed on exit from IR::monitorexit");
2432     __ bind(L);
2433     }
2434 #endif
2435     restore_native_result(masm, ret_type, stack_slots);
2436     // check_forward_pending_exception jump to forward_exception if any pending
2437     // exception is set.  The forward_exception routine expects to see the
2438     // exception in pending_exception and not in a register.  Kind of clumsy,
2439     // since all folks who branch to forward_exception must have tested
2440     // pending_exception first and hence have it in a register already.
2441     __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2442     __ bind(done);
2443   }
2444 
2445   // Tell dtrace about this method exit
2446   {
2447     SkipIfEqual skip_if(
2448       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2449     save_native_result(masm, ret_type, stack_slots);
2450     __ set_oop_constant(JNIHandles::make_local(method()), O1);
2451     __ call_VM_leaf(L7_thread_cache,
2452        CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2453        G2_thread, O1);
2454     restore_native_result(masm, ret_type, stack_slots);
2455   }
2456 
2457   // Clear "last Java frame" SP and PC.
2458   __ verify_thread(); // G2_thread must be correct
2459   __ reset_last_Java_frame();
2460 
2461   // Unpack oop result
2462   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2463       Label L;
2464       __ addcc(G0, I0, G0);
2465       __ brx(Assembler::notZero, true, Assembler::pt, L);
2466       __ delayed()->ld_ptr(I0, 0, I0);
2467       __ mov(G0, I0);
2468       __ bind(L);
2469       __ verify_oop(I0);
2470   }
2471 
2472   // reset handle block
2473   __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2474   __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2475 
2476   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2477   check_forward_pending_exception(masm, G3_scratch);
2478 
2479 
2480   // Return
2481 
2482 #ifndef _LP64
2483   if (ret_type == T_LONG) {
2484 
2485     // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2486     __ sllx(I0, 32, G1);          // Shift bits into high G1
2487     __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
2488     __ or3 (I1, G1, G1);          // OR 64 bits into G1
2489   }
2490 #endif
2491 
2492   __ ret();
2493   __ delayed()->restore();
2494 
2495   __ flush();
2496 
2497   nmethod *nm = nmethod::new_native_nmethod(method,
2498                                             masm->code(),
2499                                             vep_offset,
2500                                             frame_complete,
2501                                             stack_slots / VMRegImpl::slots_per_word,
2502                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2503                                             in_ByteSize(lock_offset),
2504                                             oop_maps);
2505   return nm;
2506 
2507 }
2508 
2509 #ifdef HAVE_DTRACE_H
2510 // ---------------------------------------------------------------------------
2511 // Generate a dtrace nmethod for a given signature.  The method takes arguments
2512 // in the Java compiled code convention, marshals them to the native
2513 // abi and then leaves nops at the position you would expect to call a native
2514 // function. When the probe is enabled the nops are replaced with a trap
2515 // instruction that dtrace inserts and the trace will cause a notification
2516 // to dtrace.
2517 //
2518 // The probes are only able to take primitive types and java/lang/String as
2519 // arguments.  No other java types are allowed. Strings are converted to utf8
2520 // strings so that from dtrace point of view java strings are converted to C
2521 // strings. There is an arbitrary fixed limit on the total space that a method
2522 // can use for converting the strings. (256 chars per string in the signature).
2523 // So any java string larger then this is truncated.
2524 
2525 static int  fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2526 static bool offsets_initialized = false;
2527 
2528 static VMRegPair reg64_to_VMRegPair(Register r) {
2529   VMRegPair ret;
2530   if (wordSize == 8) {
2531     ret.set2(r->as_VMReg());
2532   } else {
2533     ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
2534   }
2535   return ret;
2536 }
2537 
2538 
2539 nmethod *SharedRuntime::generate_dtrace_nmethod(
2540     MacroAssembler *masm, methodHandle method) {
2541 
2542 
2543   // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2544   // be single threaded in this method.
2545   assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2546 
2547   // Fill in the signature array, for the calling-convention call.
2548   int total_args_passed = method->size_of_parameters();
2549 
2550   BasicType* in_sig_bt  = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2551   VMRegPair  *in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2552 
2553   // The signature we are going to use for the trap that dtrace will see
2554   // java/lang/String is converted. We drop "this" and any other object
2555   // is converted to NULL.  (A one-slot java/lang/Long object reference
2556   // is converted to a two-slot long, which is why we double the allocation).
2557   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2558   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2559 
2560   int i=0;
2561   int total_strings = 0;
2562   int first_arg_to_pass = 0;
2563   int total_c_args = 0;
2564 
2565   // Skip the receiver as dtrace doesn't want to see it
2566   if( !method->is_static() ) {
2567     in_sig_bt[i++] = T_OBJECT;
2568     first_arg_to_pass = 1;
2569   }
2570 
2571   SignatureStream ss(method->signature());
2572   for ( ; !ss.at_return_type(); ss.next()) {
2573     BasicType bt = ss.type();
2574     in_sig_bt[i++] = bt;  // Collect remaining bits of signature
2575     out_sig_bt[total_c_args++] = bt;
2576     if( bt == T_OBJECT) {
2577       symbolOop s = ss.as_symbol_or_null();
2578       if (s == vmSymbols::java_lang_String()) {
2579         total_strings++;
2580         out_sig_bt[total_c_args-1] = T_ADDRESS;
2581       } else if (s == vmSymbols::java_lang_Boolean() ||
2582                  s == vmSymbols::java_lang_Byte()) {
2583         out_sig_bt[total_c_args-1] = T_BYTE;
2584       } else if (s == vmSymbols::java_lang_Character() ||
2585                  s == vmSymbols::java_lang_Short()) {
2586         out_sig_bt[total_c_args-1] = T_SHORT;
2587       } else if (s == vmSymbols::java_lang_Integer() ||
2588                  s == vmSymbols::java_lang_Float()) {
2589         out_sig_bt[total_c_args-1] = T_INT;
2590       } else if (s == vmSymbols::java_lang_Long() ||
2591                  s == vmSymbols::java_lang_Double()) {
2592         out_sig_bt[total_c_args-1] = T_LONG;
2593         out_sig_bt[total_c_args++] = T_VOID;
2594       }
2595     } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2596       in_sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
2597       // We convert double to long
2598       out_sig_bt[total_c_args-1] = T_LONG;
2599       out_sig_bt[total_c_args++] = T_VOID;
2600     } else if ( bt == T_FLOAT) {
2601       // We convert float to int
2602       out_sig_bt[total_c_args-1] = T_INT;
2603     }
2604   }
2605 
2606   assert(i==total_args_passed, "validly parsed signature");
2607 
2608   // Now get the compiled-Java layout as input arguments
2609   int comp_args_on_stack;
2610   comp_args_on_stack = SharedRuntime::java_calling_convention(
2611       in_sig_bt, in_regs, total_args_passed, false);
2612 
2613   // We have received a description of where all the java arg are located
2614   // on entry to the wrapper. We need to convert these args to where
2615   // the a  native (non-jni) function would expect them. To figure out
2616   // where they go we convert the java signature to a C signature and remove
2617   // T_VOID for any long/double we might have received.
2618 
2619 
2620   // Now figure out where the args must be stored and how much stack space
2621   // they require (neglecting out_preserve_stack_slots but space for storing
2622   // the 1st six register arguments). It's weird see int_stk_helper.
2623   //
2624   int out_arg_slots;
2625   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2626 
2627   // Calculate the total number of stack slots we will need.
2628 
2629   // First count the abi requirement plus all of the outgoing args
2630   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2631 
2632   // Plus a temp for possible converion of float/double/long register args
2633 
2634   int conversion_temp = stack_slots;
2635   stack_slots += 2;
2636 
2637 
2638   // Now space for the string(s) we must convert
2639 
2640   int string_locs = stack_slots;
2641   stack_slots += total_strings *
2642                    (max_dtrace_string_size / VMRegImpl::stack_slot_size);
2643 
2644   // Ok The space we have allocated will look like:
2645   //
2646   //
2647   // FP-> |                     |
2648   //      |---------------------|
2649   //      | string[n]           |
2650   //      |---------------------| <- string_locs[n]
2651   //      | string[n-1]         |
2652   //      |---------------------| <- string_locs[n-1]
2653   //      | ...                 |
2654   //      | ...                 |
2655   //      |---------------------| <- string_locs[1]
2656   //      | string[0]           |
2657   //      |---------------------| <- string_locs[0]
2658   //      | temp                |
2659   //      |---------------------| <- conversion_temp
2660   //      | outbound memory     |
2661   //      | based arguments     |
2662   //      |                     |
2663   //      |---------------------|
2664   //      |                     |
2665   // SP-> | out_preserved_slots |
2666   //
2667   //
2668 
2669   // Now compute actual number of stack words we need rounding to make
2670   // stack properly aligned.
2671   stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2672 
2673   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2674 
2675   intptr_t start = (intptr_t)__ pc();
2676 
2677   // First thing make an ic check to see if we should even be here
2678 
2679   {
2680     Label L;
2681     const Register temp_reg = G3_scratch;
2682     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2683     __ verify_oop(O0);
2684     __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
2685     __ cmp(temp_reg, G5_inline_cache_reg);
2686     __ brx(Assembler::equal, true, Assembler::pt, L);
2687     __ delayed()->nop();
2688 
2689     __ jump_to(ic_miss, temp_reg);
2690     __ delayed()->nop();
2691     __ align(CodeEntryAlignment);
2692     __ bind(L);
2693   }
2694 
2695   int vep_offset = ((intptr_t)__ pc()) - start;
2696 
2697 
2698   // The instruction at the verified entry point must be 5 bytes or longer
2699   // because it can be patched on the fly by make_non_entrant. The stack bang
2700   // instruction fits that requirement.
2701 
2702   // Generate stack overflow check before creating frame
2703   __ generate_stack_overflow_check(stack_size);
2704 
2705   assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
2706          "valid size for make_non_entrant");
2707 
2708   // Generate a new frame for the wrapper.
2709   __ save(SP, -stack_size, SP);
2710 
2711   // Frame is now completed as far a size and linkage.
2712 
2713   int frame_complete = ((intptr_t)__ pc()) - start;
2714 
2715 #ifdef ASSERT
2716   bool reg_destroyed[RegisterImpl::number_of_registers];
2717   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2718   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2719     reg_destroyed[r] = false;
2720   }
2721   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2722     freg_destroyed[f] = false;
2723   }
2724 
2725 #endif /* ASSERT */
2726 
2727   VMRegPair zero;
2728   const Register g0 = G0; // without this we get a compiler warning (why??)
2729   zero.set2(g0->as_VMReg());
2730 
2731   int c_arg, j_arg;
2732 
2733   Register conversion_off = noreg;
2734 
2735   for (j_arg = first_arg_to_pass, c_arg = 0 ;
2736        j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2737 
2738     VMRegPair src = in_regs[j_arg];
2739     VMRegPair dst = out_regs[c_arg];
2740 
2741 #ifdef ASSERT
2742     if (src.first()->is_Register()) {
2743       assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
2744     } else if (src.first()->is_FloatRegister()) {
2745       assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
2746                                                FloatRegisterImpl::S)], "ack!");
2747     }
2748     if (dst.first()->is_Register()) {
2749       reg_destroyed[dst.first()->as_Register()->encoding()] = true;
2750     } else if (dst.first()->is_FloatRegister()) {
2751       freg_destroyed[dst.first()->as_FloatRegister()->encoding(
2752                                                  FloatRegisterImpl::S)] = true;
2753     }
2754 #endif /* ASSERT */
2755 
2756     switch (in_sig_bt[j_arg]) {
2757       case T_ARRAY:
2758       case T_OBJECT:
2759         {
2760           if (out_sig_bt[c_arg] == T_BYTE  || out_sig_bt[c_arg] == T_SHORT ||
2761               out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2762             // need to unbox a one-slot value
2763             Register in_reg = L0;
2764             Register tmp = L2;
2765             if ( src.first()->is_reg() ) {
2766               in_reg = src.first()->as_Register();
2767             } else {
2768               assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
2769                      "must be");
2770               __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
2771             }
2772             // If the final destination is an acceptable register
2773             if ( dst.first()->is_reg() ) {
2774               if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
2775                 tmp = dst.first()->as_Register();
2776               }
2777             }
2778 
2779             Label skipUnbox;
2780             if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
2781               __ mov(G0, tmp->successor());
2782             }
2783             __ br_null(in_reg, true, Assembler::pn, skipUnbox);
2784             __ delayed()->mov(G0, tmp);
2785 
2786             BasicType bt = out_sig_bt[c_arg];
2787             int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2788             switch (bt) {
2789                 case T_BYTE:
2790                   __ ldub(in_reg, box_offset, tmp); break;
2791                 case T_SHORT:
2792                   __ lduh(in_reg, box_offset, tmp); break;
2793                 case T_INT:
2794                   __ ld(in_reg, box_offset, tmp); break;
2795                 case T_LONG:
2796                   __ ld_long(in_reg, box_offset, tmp); break;
2797                 default: ShouldNotReachHere();
2798             }
2799 
2800             __ bind(skipUnbox);
2801             // If tmp wasn't final destination copy to final destination
2802             if (tmp == L2) {
2803               VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
2804               if (out_sig_bt[c_arg] == T_LONG) {
2805                 long_move(masm, tmp_as_VM, dst);
2806               } else {
2807                 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
2808               }
2809             }
2810             if (out_sig_bt[c_arg] == T_LONG) {
2811               assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2812               ++c_arg; // move over the T_VOID to keep the loop indices in sync
2813             }
2814           } else if (out_sig_bt[c_arg] == T_ADDRESS) {
2815             Register s =
2816                 src.first()->is_reg() ? src.first()->as_Register() : L2;
2817             Register d =
2818                 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2819 
2820             // We store the oop now so that the conversion pass can reach
2821             // while in the inner frame. This will be the only store if
2822             // the oop is NULL.
2823             if (s != L2) {
2824               // src is register
2825               if (d != L2) {
2826                 // dst is register
2827                 __ mov(s, d);
2828               } else {
2829                 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2830                           STACK_BIAS), "must be");
2831                 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
2832               }
2833             } else {
2834                 // src not a register
2835                 assert(Assembler::is_simm13(reg2offset(src.first()) +
2836                            STACK_BIAS), "must be");
2837                 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
2838                 if (d == L2) {
2839                   assert(Assembler::is_simm13(reg2offset(dst.first()) +
2840                              STACK_BIAS), "must be");
2841                   __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
2842                 }
2843             }
2844           } else if (out_sig_bt[c_arg] != T_VOID) {
2845             // Convert the arg to NULL
2846             if (dst.first()->is_reg()) {
2847               __ mov(G0, dst.first()->as_Register());
2848             } else {
2849               assert(Assembler::is_simm13(reg2offset(dst.first()) +
2850                          STACK_BIAS), "must be");
2851               __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
2852             }
2853           }
2854         }
2855         break;
2856       case T_VOID:
2857         break;
2858 
2859       case T_FLOAT:
2860         if (src.first()->is_stack()) {
2861           // Stack to stack/reg is simple
2862           move32_64(masm, src, dst);
2863         } else {
2864           if (dst.first()->is_reg()) {
2865             // freg -> reg
2866             int off =
2867               STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2868             Register d = dst.first()->as_Register();
2869             if (Assembler::is_simm13(off)) {
2870               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2871                      SP, off);
2872               __ ld(SP, off, d);
2873             } else {
2874               if (conversion_off == noreg) {
2875                 __ set(off, L6);
2876                 conversion_off = L6;
2877               }
2878               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2879                      SP, conversion_off);
2880               __ ld(SP, conversion_off , d);
2881             }
2882           } else {
2883             // freg -> mem
2884             int off = STACK_BIAS + reg2offset(dst.first());
2885             if (Assembler::is_simm13(off)) {
2886               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2887                      SP, off);
2888             } else {
2889               if (conversion_off == noreg) {
2890                 __ set(off, L6);
2891                 conversion_off = L6;
2892               }
2893               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2894                      SP, conversion_off);
2895             }
2896           }
2897         }
2898         break;
2899 
2900       case T_DOUBLE:
2901         assert( j_arg + 1 < total_args_passed &&
2902                 in_sig_bt[j_arg + 1] == T_VOID &&
2903                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2904         if (src.first()->is_stack()) {
2905           // Stack to stack/reg is simple
2906           long_move(masm, src, dst);
2907         } else {
2908           Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2909 
2910           // Destination could be an odd reg on 32bit in which case
2911           // we can't load direct to the destination.
2912 
2913           if (!d->is_even() && wordSize == 4) {
2914             d = L2;
2915           }
2916           int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2917           if (Assembler::is_simm13(off)) {
2918             __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2919                    SP, off);
2920             __ ld_long(SP, off, d);
2921           } else {
2922             if (conversion_off == noreg) {
2923               __ set(off, L6);
2924               conversion_off = L6;
2925             }
2926             __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2927                    SP, conversion_off);
2928             __ ld_long(SP, conversion_off, d);
2929           }
2930           if (d == L2) {
2931             long_move(masm, reg64_to_VMRegPair(L2), dst);
2932           }
2933         }
2934         break;
2935 
2936       case T_LONG :
2937         // 32bit can't do a split move of something like g1 -> O0, O1
2938         // so use a memory temp
2939         if (src.is_single_phys_reg() && wordSize == 4) {
2940           Register tmp = L2;
2941           if (dst.first()->is_reg() &&
2942               (wordSize == 8 || dst.first()->as_Register()->is_even())) {
2943             tmp = dst.first()->as_Register();
2944           }
2945 
2946           int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2947           if (Assembler::is_simm13(off)) {
2948             __ stx(src.first()->as_Register(), SP, off);
2949             __ ld_long(SP, off, tmp);
2950           } else {
2951             if (conversion_off == noreg) {
2952               __ set(off, L6);
2953               conversion_off = L6;
2954             }
2955             __ stx(src.first()->as_Register(), SP, conversion_off);
2956             __ ld_long(SP, conversion_off, tmp);
2957           }
2958 
2959           if (tmp == L2) {
2960             long_move(masm, reg64_to_VMRegPair(L2), dst);
2961           }
2962         } else {
2963           long_move(masm, src, dst);
2964         }
2965         break;
2966 
2967       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2968 
2969       default:
2970         move32_64(masm, src, dst);
2971     }
2972   }
2973 
2974 
2975   // If we have any strings we must store any register based arg to the stack
2976   // This includes any still live xmm registers too.
2977 
2978   if (total_strings > 0 ) {
2979 
2980     // protect all the arg registers
2981     __ save_frame(0);
2982     __ mov(G2_thread, L7_thread_cache);
2983     const Register L2_string_off = L2;
2984 
2985     // Get first string offset
2986     __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
2987 
2988     for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
2989       if (out_sig_bt[c_arg] == T_ADDRESS) {
2990 
2991         VMRegPair dst = out_regs[c_arg];
2992         const Register d = dst.first()->is_reg() ?
2993             dst.first()->as_Register()->after_save() : noreg;
2994 
2995         // It's a string the oop and it was already copied to the out arg
2996         // position
2997         if (d != noreg) {
2998           __ mov(d, O0);
2999         } else {
3000           assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3001                  "must be");
3002           __ ld_ptr(FP,  reg2offset(dst.first()) + STACK_BIAS, O0);
3003         }
3004         Label skip;
3005 
3006         __ br_null(O0, false, Assembler::pn, skip);
3007         __ delayed()->add(FP, L2_string_off, O1);
3008 
3009         if (d != noreg) {
3010           __ mov(O1, d);
3011         } else {
3012           assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
3013                  "must be");
3014           __ st_ptr(O1, FP,  reg2offset(dst.first()) + STACK_BIAS);
3015         }
3016 
3017         __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
3018                 relocInfo::runtime_call_type);
3019         __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
3020 
3021         __ bind(skip);
3022 
3023       }
3024 
3025     }
3026     __ mov(L7_thread_cache, G2_thread);
3027     __ restore();
3028 
3029   }
3030 
3031 
3032   // Ok now we are done. Need to place the nop that dtrace wants in order to
3033   // patch in the trap
3034 
3035   int patch_offset = ((intptr_t)__ pc()) - start;
3036 
3037   __ nop();
3038 
3039 
3040   // Return
3041 
3042   __ ret();
3043   __ delayed()->restore();
3044 
3045   __ flush();
3046 
3047   nmethod *nm = nmethod::new_dtrace_nmethod(
3048       method, masm->code(), vep_offset, patch_offset, frame_complete,
3049       stack_slots / VMRegImpl::slots_per_word);
3050   return nm;
3051 
3052 }
3053 
3054 #endif // HAVE_DTRACE_H
3055 
3056 // this function returns the adjust size (in number of words) to a c2i adapter
3057 // activation for use during deoptimization
3058 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3059   assert(callee_locals >= callee_parameters,
3060           "test and remove; got more parms than locals");
3061   if (callee_locals < callee_parameters)
3062     return 0;                   // No adjustment for negative locals
3063   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords();
3064   return round_to(diff, WordsPerLong);
3065 }
3066 
3067 // "Top of Stack" slots that may be unused by the calling convention but must
3068 // otherwise be preserved.
3069 // On Intel these are not necessary and the value can be zero.
3070 // On Sparc this describes the words reserved for storing a register window
3071 // when an interrupt occurs.
3072 uint SharedRuntime::out_preserve_stack_slots() {
3073   return frame::register_save_words * VMRegImpl::slots_per_word;
3074 }
3075 
3076 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
3077 //
3078 // Common out the new frame generation for deopt and uncommon trap
3079 //
3080   Register        G3pcs              = G3_scratch; // Array of new pcs (input)
3081   Register        Oreturn0           = O0;
3082   Register        Oreturn1           = O1;
3083   Register        O2UnrollBlock      = O2;
3084   Register        O3array            = O3;         // Array of frame sizes (input)
3085   Register        O4array_size       = O4;         // number of frames (input)
3086   Register        O7frame_size       = O7;         // number of frames (input)
3087 
3088   __ ld_ptr(O3array, 0, O7frame_size);
3089   __ sub(G0, O7frame_size, O7frame_size);
3090   __ save(SP, O7frame_size, SP);
3091   __ ld_ptr(G3pcs, 0, I7);                      // load frame's new pc
3092 
3093   #ifdef ASSERT
3094   // make sure that the frames are aligned properly
3095 #ifndef _LP64
3096   __ btst(wordSize*2-1, SP);
3097   __ breakpoint_trap(Assembler::notZero);
3098 #endif
3099   #endif
3100 
3101   // Deopt needs to pass some extra live values from frame to frame
3102 
3103   if (deopt) {
3104     __ mov(Oreturn0->after_save(), Oreturn0);
3105     __ mov(Oreturn1->after_save(), Oreturn1);
3106   }
3107 
3108   __ mov(O4array_size->after_save(), O4array_size);
3109   __ sub(O4array_size, 1, O4array_size);
3110   __ mov(O3array->after_save(), O3array);
3111   __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
3112   __ add(G3pcs, wordSize, G3pcs);               // point to next pc value
3113 
3114   #ifdef ASSERT
3115   // trash registers to show a clear pattern in backtraces
3116   __ set(0xDEAD0000, I0);
3117   __ add(I0,  2, I1);
3118   __ add(I0,  4, I2);
3119   __ add(I0,  6, I3);
3120   __ add(I0,  8, I4);
3121   // Don't touch I5 could have valuable savedSP
3122   __ set(0xDEADBEEF, L0);
3123   __ mov(L0, L1);
3124   __ mov(L0, L2);
3125   __ mov(L0, L3);
3126   __ mov(L0, L4);
3127   __ mov(L0, L5);
3128 
3129   // trash the return value as there is nothing to return yet
3130   __ set(0xDEAD0001, O7);
3131   #endif
3132 
3133   __ mov(SP, O5_savedSP);
3134 }
3135 
3136 
3137 static void make_new_frames(MacroAssembler* masm, bool deopt) {
3138   //
3139   // loop through the UnrollBlock info and create new frames
3140   //
3141   Register        G3pcs              = G3_scratch;
3142   Register        Oreturn0           = O0;
3143   Register        Oreturn1           = O1;
3144   Register        O2UnrollBlock      = O2;
3145   Register        O3array            = O3;
3146   Register        O4array_size       = O4;
3147   Label           loop;
3148 
3149   // Before we make new frames, check to see if stack is available.
3150   // Do this after the caller's return address is on top of stack
3151   if (UseStackBanging) {
3152     // Get total frame size for interpreted frames
3153     __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
3154     __ bang_stack_size(O4, O3, G3_scratch);
3155   }
3156 
3157   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
3158   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
3159   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
3160 
3161   // Adjust old interpreter frame to make space for new frame's extra java locals
3162   //
3163   // We capture the original sp for the transition frame only because it is needed in
3164   // order to properly calculate interpreter_sp_adjustment. Even though in real life
3165   // every interpreter frame captures a savedSP it is only needed at the transition
3166   // (fortunately). If we had to have it correct everywhere then we would need to
3167   // be told the sp_adjustment for each frame we create. If the frame size array
3168   // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3169   // for each frame we create and keep up the illusion every where.
3170   //
3171 
3172   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
3173   __ mov(SP, O5_savedSP);       // remember initial sender's original sp before adjustment
3174   __ sub(SP, O7, SP);
3175 
3176 #ifdef ASSERT
3177   // make sure that there is at least one entry in the array
3178   __ tst(O4array_size);
3179   __ breakpoint_trap(Assembler::zero);
3180 #endif
3181 
3182   // Now push the new interpreter frames
3183   __ bind(loop);
3184 
3185   // allocate a new frame, filling the registers
3186 
3187   gen_new_frame(masm, deopt);        // allocate an interpreter frame
3188 
3189   __ tst(O4array_size);
3190   __ br(Assembler::notZero, false, Assembler::pn, loop);
3191   __ delayed()->add(O3array, wordSize, O3array);
3192   __ ld_ptr(G3pcs, 0, O7);                      // load final frame new pc
3193 
3194 }
3195 
3196 //------------------------------generate_deopt_blob----------------------------
3197 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3198 // instead.
3199 void SharedRuntime::generate_deopt_blob() {
3200   // allocate space for the code
3201   ResourceMark rm;
3202   // setup code generation tools
3203   int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3204 #ifdef _LP64
3205   CodeBuffer buffer("deopt_blob", 2100+pad, 512);
3206 #else
3207   // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
3208   // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
3209   CodeBuffer buffer("deopt_blob", 1600+pad, 512);
3210 #endif /* _LP64 */
3211   MacroAssembler* masm               = new MacroAssembler(&buffer);
3212   FloatRegister   Freturn0           = F0;
3213   Register        Greturn1           = G1;
3214   Register        Oreturn0           = O0;
3215   Register        Oreturn1           = O1;
3216   Register        O2UnrollBlock      = O2;
3217   Register        O3tmp              = O3;
3218   Register        I5exception_tmp    = I5;
3219   Register        G4exception_tmp    = G4_scratch;
3220   int             frame_size_words;
3221   Address         saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
3222 #if !defined(_LP64) && defined(COMPILER2)
3223   Address         saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
3224 #endif
3225   Label           cont;
3226 
3227   OopMapSet *oop_maps = new OopMapSet();
3228 
3229   //
3230   // This is the entry point for code which is returning to a de-optimized
3231   // frame.
3232   // The steps taken by this frame are as follows:
3233   //   - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
3234   //     and all potentially live registers (at a pollpoint many registers can be live).
3235   //
3236   //   - call the C routine: Deoptimization::fetch_unroll_info (this function
3237   //     returns information about the number and size of interpreter frames
3238   //     which are equivalent to the frame which is being deoptimized)
3239   //   - deallocate the unpack frame, restoring only results values. Other
3240   //     volatile registers will now be captured in the vframeArray as needed.
3241   //   - deallocate the deoptimization frame
3242   //   - in a loop using the information returned in the previous step
3243   //     push new interpreter frames (take care to propagate the return
3244   //     values through each new frame pushed)
3245   //   - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3246   //   - call the C routine: Deoptimization::unpack_frames (this function
3247   //     lays out values on the interpreter frame which was just created)
3248   //   - deallocate the dummy unpack_frame
3249   //   - ensure that all the return values are correctly set and then do
3250   //     a return to the interpreter entry point
3251   //
3252   // Refer to the following methods for more information:
3253   //   - Deoptimization::fetch_unroll_info
3254   //   - Deoptimization::unpack_frames
3255 
3256   OopMap* map = NULL;
3257 
3258   int start = __ offset();
3259 
3260   // restore G2, the trampoline destroyed it
3261   __ get_thread();
3262 
3263   // On entry we have been called by the deoptimized nmethod with a call that
3264   // replaced the original call (or safepoint polling location) so the deoptimizing
3265   // pc is now in O7. Return values are still in the expected places
3266 
3267   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3268   __ ba(false, cont);
3269   __ delayed()->mov(Deoptimization::Unpack_deopt, I5exception_tmp);
3270 
3271   int exception_offset = __ offset() - start;
3272 
3273   // restore G2, the trampoline destroyed it
3274   __ get_thread();
3275 
3276   // On entry we have been jumped to by the exception handler (or exception_blob
3277   // for server).  O0 contains the exception oop and O7 contains the original
3278   // exception pc.  So if we push a frame here it will look to the
3279   // stack walking code (fetch_unroll_info) just like a normal call so
3280   // state will be extracted normally.
3281 
3282   // save exception oop in JavaThread and fall through into the
3283   // exception_in_tls case since they are handled in same way except
3284   // for where the pending exception is kept.
3285   __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3286 
3287   //
3288   // Vanilla deoptimization with an exception pending in exception_oop
3289   //
3290   int exception_in_tls_offset = __ offset() - start;
3291 
3292   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3293   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3294 
3295   // Restore G2_thread
3296   __ get_thread();
3297 
3298 #ifdef ASSERT
3299   {
3300     // verify that there is really an exception oop in exception_oop
3301     Label has_exception;
3302     __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3303     __ br_notnull(Oexception, false, Assembler::pt, has_exception);
3304     __ delayed()-> nop();
3305     __ stop("no exception in thread");
3306     __ bind(has_exception);
3307 
3308     // verify that there is no pending exception
3309     Label no_pending_exception;
3310     Address exception_addr(G2_thread, Thread::pending_exception_offset());
3311     __ ld_ptr(exception_addr, Oexception);
3312     __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
3313     __ delayed()->nop();
3314     __ stop("must not have pending exception here");
3315     __ bind(no_pending_exception);
3316   }
3317 #endif
3318 
3319   __ ba(false, cont);
3320   __ delayed()->mov(Deoptimization::Unpack_exception, I5exception_tmp);;
3321 
3322   //
3323   // Reexecute entry, similar to c2 uncommon trap
3324   //
3325   int reexecute_offset = __ offset() - start;
3326 
3327   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3328   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3329 
3330   __ mov(Deoptimization::Unpack_reexecute, I5exception_tmp);
3331 
3332   __ bind(cont);
3333 
3334   __ set_last_Java_frame(SP, noreg);
3335 
3336   // do the call by hand so we can get the oopmap
3337 
3338   __ mov(G2_thread, L7_thread_cache);
3339   __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3340   __ delayed()->mov(G2_thread, O0);
3341 
3342   // Set an oopmap for the call site this describes all our saved volatile registers
3343 
3344   oop_maps->add_gc_map( __ offset()-start, map);
3345 
3346   __ mov(L7_thread_cache, G2_thread);
3347 
3348   __ reset_last_Java_frame();
3349 
3350   // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3351   // so this move will survive
3352 
3353   __ mov(I5exception_tmp, G4exception_tmp);
3354 
3355   __ mov(O0, O2UnrollBlock->after_save());
3356 
3357   RegisterSaver::restore_result_registers(masm);
3358 
3359   Label noException;
3360   __ cmp(G4exception_tmp, Deoptimization::Unpack_exception);   // Was exception pending?
3361   __ br(Assembler::notEqual, false, Assembler::pt, noException);
3362   __ delayed()->nop();
3363 
3364   // Move the pending exception from exception_oop to Oexception so
3365   // the pending exception will be picked up the interpreter.
3366   __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3367   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3368   __ bind(noException);
3369 
3370   // deallocate the deoptimization frame taking care to preserve the return values
3371   __ mov(Oreturn0,     Oreturn0->after_save());
3372   __ mov(Oreturn1,     Oreturn1->after_save());
3373   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3374   __ restore();
3375 
3376   // Allocate new interpreter frame(s) and possible c2i adapter frame
3377 
3378   make_new_frames(masm, true);
3379 
3380   // push a dummy "unpack_frame" taking care of float return values and
3381   // call Deoptimization::unpack_frames to have the unpacker layout
3382   // information in the interpreter frames just created and then return
3383   // to the interpreter entry point
3384   __ save(SP, -frame_size_words*wordSize, SP);
3385   __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3386 #if !defined(_LP64)
3387 #if defined(COMPILER2)
3388   if (!TieredCompilation) {
3389     // 32-bit 1-register longs return longs in G1
3390     __ stx(Greturn1, saved_Greturn1_addr);
3391   }
3392 #endif
3393   __ set_last_Java_frame(SP, noreg);
3394   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4exception_tmp);
3395 #else
3396   // LP64 uses g4 in set_last_Java_frame
3397   __ mov(G4exception_tmp, O1);
3398   __ set_last_Java_frame(SP, G0);
3399   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3400 #endif
3401   __ reset_last_Java_frame();
3402   __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3403 
3404   // In tiered we never use C2 to compile methods returning longs so
3405   // the result is where we expect it already.
3406 
3407 #if !defined(_LP64) && defined(COMPILER2)
3408   // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3409   // I0/I1 if the return value is long.  In the tiered world there is
3410   // a mismatch between how C1 and C2 return longs compiles and so
3411   // currently compilation of methods which return longs is disabled
3412   // for C2 and so is this code.  Eventually C1 and C2 will do the
3413   // same thing for longs in the tiered world.
3414   if (!TieredCompilation) {
3415     Label not_long;
3416     __ cmp(O0,T_LONG);
3417     __ br(Assembler::notEqual, false, Assembler::pt, not_long);
3418     __ delayed()->nop();
3419     __ ldd(saved_Greturn1_addr,I0);
3420     __ bind(not_long);
3421   }
3422 #endif
3423   __ ret();
3424   __ delayed()->restore();
3425 
3426   masm->flush();
3427   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3428   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3429 }
3430 
3431 #ifdef COMPILER2
3432 
3433 //------------------------------generate_uncommon_trap_blob--------------------
3434 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3435 // instead.
3436 void SharedRuntime::generate_uncommon_trap_blob() {
3437   // allocate space for the code
3438   ResourceMark rm;
3439   // setup code generation tools
3440   int pad = VerifyThread ? 512 : 0;
3441 #ifdef _LP64
3442   CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3443 #else
3444   // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3445   // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3446   CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3447 #endif
3448   MacroAssembler* masm               = new MacroAssembler(&buffer);
3449   Register        O2UnrollBlock      = O2;
3450   Register        O3tmp              = O3;
3451   Register        O2klass_index      = O2;
3452 
3453   //
3454   // This is the entry point for all traps the compiler takes when it thinks
3455   // it cannot handle further execution of compilation code. The frame is
3456   // deoptimized in these cases and converted into interpreter frames for
3457   // execution
3458   // The steps taken by this frame are as follows:
3459   //   - push a fake "unpack_frame"
3460   //   - call the C routine Deoptimization::uncommon_trap (this function
3461   //     packs the current compiled frame into vframe arrays and returns
3462   //     information about the number and size of interpreter frames which
3463   //     are equivalent to the frame which is being deoptimized)
3464   //   - deallocate the "unpack_frame"
3465   //   - deallocate the deoptimization frame
3466   //   - in a loop using the information returned in the previous step
3467   //     push interpreter frames;
3468   //   - create a dummy "unpack_frame"
3469   //   - call the C routine: Deoptimization::unpack_frames (this function
3470   //     lays out values on the interpreter frame which was just created)
3471   //   - deallocate the dummy unpack_frame
3472   //   - return to the interpreter entry point
3473   //
3474   //  Refer to the following methods for more information:
3475   //   - Deoptimization::uncommon_trap
3476   //   - Deoptimization::unpack_frame
3477 
3478   // the unloaded class index is in O0 (first parameter to this blob)
3479 
3480   // push a dummy "unpack_frame"
3481   // and call Deoptimization::uncommon_trap to pack the compiled frame into
3482   // vframe array and return the UnrollBlock information
3483   __ save_frame(0);
3484   __ set_last_Java_frame(SP, noreg);
3485   __ mov(I0, O2klass_index);
3486   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
3487   __ reset_last_Java_frame();
3488   __ mov(O0, O2UnrollBlock->after_save());
3489   __ restore();
3490 
3491   // deallocate the deoptimized frame taking care to preserve the return values
3492   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3493   __ restore();
3494 
3495   // Allocate new interpreter frame(s) and possible c2i adapter frame
3496 
3497   make_new_frames(masm, false);
3498 
3499   // push a dummy "unpack_frame" taking care of float return values and
3500   // call Deoptimization::unpack_frames to have the unpacker layout
3501   // information in the interpreter frames just created and then return
3502   // to the interpreter entry point
3503   __ save_frame(0);
3504   __ set_last_Java_frame(SP, noreg);
3505   __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3506   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3507   __ reset_last_Java_frame();
3508   __ ret();
3509   __ delayed()->restore();
3510 
3511   masm->flush();
3512   _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3513 }
3514 
3515 #endif // COMPILER2
3516 
3517 //------------------------------generate_handler_blob-------------------
3518 //
3519 // Generate a special Compile2Runtime blob that saves all registers, and sets
3520 // up an OopMap.
3521 //
3522 // This blob is jumped to (via a breakpoint and the signal handler) from a
3523 // safepoint in compiled code.  On entry to this blob, O7 contains the
3524 // address in the original nmethod at which we should resume normal execution.
3525 // Thus, this blob looks like a subroutine which must preserve lots of
3526 // registers and return normally.  Note that O7 is never register-allocated,
3527 // so it is guaranteed to be free here.
3528 //
3529 
3530 // The hardest part of what this blob must do is to save the 64-bit %o
3531 // registers in the 32-bit build.  A simple 'save' turn the %o's to %i's and
3532 // an interrupt will chop off their heads.  Making space in the caller's frame
3533 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3534 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3535 // SP and mess up HIS OopMaps.  So we first adjust the caller's SP, then save
3536 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3537 // Tricky, tricky, tricky...
3538 
3539 static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
3540   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3541 
3542   // allocate space for the code
3543   ResourceMark rm;
3544   // setup code generation tools
3545   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3546   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3547   // even larger with TraceJumps
3548   int pad = TraceJumps ? 512 : 0;
3549   CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3550   MacroAssembler* masm                = new MacroAssembler(&buffer);
3551   int             frame_size_words;
3552   OopMapSet *oop_maps = new OopMapSet();
3553   OopMap* map = NULL;
3554 
3555   int start = __ offset();
3556 
3557   // If this causes a return before the processing, then do a "restore"
3558   if (cause_return) {
3559     __ restore();
3560   } else {
3561     // Make it look like we were called via the poll
3562     // so that frame constructor always sees a valid return address
3563     __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3564     __ sub(O7, frame::pc_return_offset, O7);
3565   }
3566 
3567   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3568 
3569   // setup last_Java_sp (blows G4)
3570   __ set_last_Java_frame(SP, noreg);
3571 
3572   // call into the runtime to handle illegal instructions exception
3573   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3574   __ mov(G2_thread, O0);
3575   __ save_thread(L7_thread_cache);
3576   __ call(call_ptr);
3577   __ delayed()->nop();
3578 
3579   // Set an oopmap for the call site.
3580   // We need this not only for callee-saved registers, but also for volatile
3581   // registers that the compiler might be keeping live across a safepoint.
3582 
3583   oop_maps->add_gc_map( __ offset() - start, map);
3584 
3585   __ restore_thread(L7_thread_cache);
3586   // clear last_Java_sp
3587   __ reset_last_Java_frame();
3588 
3589   // Check for exceptions
3590   Label pending;
3591 
3592   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3593   __ tst(O1);
3594   __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3595   __ delayed()->nop();
3596 
3597   RegisterSaver::restore_live_registers(masm);
3598 
3599   // We are back the the original state on entry and ready to go.
3600 
3601   __ retl();
3602   __ delayed()->nop();
3603 
3604   // Pending exception after the safepoint
3605 
3606   __ bind(pending);
3607 
3608   RegisterSaver::restore_live_registers(masm);
3609 
3610   // We are back the the original state on entry.
3611 
3612   // Tail-call forward_exception_entry, with the issuing PC in O7,
3613   // so it looks like the original nmethod called forward_exception_entry.
3614   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3615   __ JMP(O0, 0);
3616   __ delayed()->nop();
3617 
3618   // -------------
3619   // make sure all code is generated
3620   masm->flush();
3621 
3622   // return exception blob
3623   return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3624 }
3625 
3626 //
3627 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3628 //
3629 // Generate a stub that calls into vm to find out the proper destination
3630 // of a java call. All the argument registers are live at this point
3631 // but since this is generic code we don't know what they are and the caller
3632 // must do any gc of the args.
3633 //
3634 static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
3635   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3636 
3637   // allocate space for the code
3638   ResourceMark rm;
3639   // setup code generation tools
3640   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3641   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3642   // even larger with TraceJumps
3643   int pad = TraceJumps ? 512 : 0;
3644   CodeBuffer buffer(name, 1600 + pad, 512);
3645   MacroAssembler* masm                = new MacroAssembler(&buffer);
3646   int             frame_size_words;
3647   OopMapSet *oop_maps = new OopMapSet();
3648   OopMap* map = NULL;
3649 
3650   int start = __ offset();
3651 
3652   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3653 
3654   int frame_complete = __ offset();
3655 
3656   // setup last_Java_sp (blows G4)
3657   __ set_last_Java_frame(SP, noreg);
3658 
3659   // call into the runtime to handle illegal instructions exception
3660   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3661   __ mov(G2_thread, O0);
3662   __ save_thread(L7_thread_cache);
3663   __ call(destination, relocInfo::runtime_call_type);
3664   __ delayed()->nop();
3665 
3666   // O0 contains the address we are going to jump to assuming no exception got installed
3667 
3668   // Set an oopmap for the call site.
3669   // We need this not only for callee-saved registers, but also for volatile
3670   // registers that the compiler might be keeping live across a safepoint.
3671 
3672   oop_maps->add_gc_map( __ offset() - start, map);
3673 
3674   __ restore_thread(L7_thread_cache);
3675   // clear last_Java_sp
3676   __ reset_last_Java_frame();
3677 
3678   // Check for exceptions
3679   Label pending;
3680 
3681   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3682   __ tst(O1);
3683   __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3684   __ delayed()->nop();
3685 
3686   // get the returned methodOop
3687 
3688   __ get_vm_result(G5_method);
3689   __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3690 
3691   // O0 is where we want to jump, overwrite G3 which is saved and scratch
3692 
3693   __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3694 
3695   RegisterSaver::restore_live_registers(masm);
3696 
3697   // We are back the the original state on entry and ready to go.
3698 
3699   __ JMP(G3, 0);
3700   __ delayed()->nop();
3701 
3702   // Pending exception after the safepoint
3703 
3704   __ bind(pending);
3705 
3706   RegisterSaver::restore_live_registers(masm);
3707 
3708   // We are back the the original state on entry.
3709 
3710   // Tail-call forward_exception_entry, with the issuing PC in O7,
3711   // so it looks like the original nmethod called forward_exception_entry.
3712   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3713   __ JMP(O0, 0);
3714   __ delayed()->nop();
3715 
3716   // -------------
3717   // make sure all code is generated
3718   masm->flush();
3719 
3720   // return the  blob
3721   // frame_size_words or bytes??
3722   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3723 }
3724 
3725 void SharedRuntime::generate_stubs() {
3726 
3727   _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
3728                                              "wrong_method_stub");
3729 
3730   _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
3731                                         "ic_miss_stub");
3732 
3733   _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
3734                                         "resolve_opt_virtual_call");
3735 
3736   _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
3737                                         "resolve_virtual_call");
3738 
3739   _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
3740                                         "resolve_static_call");
3741 
3742   _polling_page_safepoint_handler_blob =
3743     generate_handler_blob(CAST_FROM_FN_PTR(address,
3744                    SafepointSynchronize::handle_polling_page_exception), false);
3745 
3746   _polling_page_return_handler_blob =
3747     generate_handler_blob(CAST_FROM_FN_PTR(address,
3748                    SafepointSynchronize::handle_polling_page_exception), true);
3749 
3750   generate_deopt_blob();
3751 
3752 #ifdef COMPILER2
3753   generate_uncommon_trap_blob();
3754 #endif // COMPILER2
3755 }