1 /*
   2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "assembler_sparc.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "oops/compiledICHolderOop.hpp"
  33 #include "prims/jvmtiRedefineClassesTrace.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/vframeArray.hpp"
  36 #include "vmreg_sparc.inline.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 #ifdef COMPILER2
  41 #include "opto/runtime.hpp"
  42 #endif
  43 #ifdef SHARK
  44 #include "compiler/compileBroker.hpp"
  45 #include "shark/sharkCompiler.hpp"
  46 #endif
  47 
  48 #define __ masm->
  49 
  50 
  51 class RegisterSaver {
  52 
  53   // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
  54   // The Oregs are problematic. In the 32bit build the compiler can
  55   // have O registers live with 64 bit quantities. A window save will
  56   // cut the heads off of the registers. We have to do a very extensive
  57   // stack dance to save and restore these properly.
  58 
  59   // Note that the Oregs problem only exists if we block at either a polling
  60   // page exception a compiled code safepoint that was not originally a call
  61   // or deoptimize following one of these kinds of safepoints.
  62 
  63   // Lots of registers to save.  For all builds, a window save will preserve
  64   // the %i and %l registers.  For the 32-bit longs-in-two entries and 64-bit
  65   // builds a window-save will preserve the %o registers.  In the LION build
  66   // we need to save the 64-bit %o registers which requires we save them
  67   // before the window-save (as then they become %i registers and get their
  68   // heads chopped off on interrupt).  We have to save some %g registers here
  69   // as well.
  70   enum {
  71     // This frame's save area.  Includes extra space for the native call:
  72     // vararg's layout space and the like.  Briefly holds the caller's
  73     // register save area.
  74     call_args_area = frame::register_save_words_sp_offset +
  75                      frame::memory_parameter_word_sp_offset*wordSize,
  76     // Make sure save locations are always 8 byte aligned.
  77     // can't use round_to because it doesn't produce compile time constant
  78     start_of_extra_save_area = ((call_args_area + 7) & ~7),
  79     g1_offset = start_of_extra_save_area, // g-regs needing saving
  80     g3_offset = g1_offset+8,
  81     g4_offset = g3_offset+8,
  82     g5_offset = g4_offset+8,
  83     o0_offset = g5_offset+8,
  84     o1_offset = o0_offset+8,
  85     o2_offset = o1_offset+8,
  86     o3_offset = o2_offset+8,
  87     o4_offset = o3_offset+8,
  88     o5_offset = o4_offset+8,
  89     start_of_flags_save_area = o5_offset+8,
  90     ccr_offset = start_of_flags_save_area,
  91     fsr_offset = ccr_offset + 8,
  92     d00_offset = fsr_offset+8,  // Start of float save area
  93     register_save_size = d00_offset+8*32
  94   };
  95 
  96 
  97   public:
  98 
  99   static int Oexception_offset() { return o0_offset; };
 100   static int G3_offset() { return g3_offset; };
 101   static int G5_offset() { return g5_offset; };
 102   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
 103   static void restore_live_registers(MacroAssembler* masm);
 104 
 105   // During deoptimization only the result register need to be restored
 106   // all the other values have already been extracted.
 107 
 108   static void restore_result_registers(MacroAssembler* masm);
 109 };
 110 
 111 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 112   // Record volatile registers as callee-save values in an OopMap so their save locations will be
 113   // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
 114   // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
 115   // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
 116   // (as the stub's I's) when the runtime routine called by the stub creates its frame.
 117   int i;
 118   // Always make the frame size 16 byte aligned.
 119   int frame_size = round_to(additional_frame_words + register_save_size, 16);
 120   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
 121   int frame_size_in_slots = frame_size / sizeof(jint);
 122   // CodeBlob frame size is in words.
 123   *total_frame_words = frame_size / wordSize;
 124   // OopMap* map = new OopMap(*total_frame_words, 0);
 125   OopMap* map = new OopMap(frame_size_in_slots, 0);
 126 
 127 #if !defined(_LP64)
 128 
 129   // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
 130   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 131   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 132   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
 133   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
 134   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
 135   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
 136 #endif /* _LP64 */
 137 
 138   __ save(SP, -frame_size, SP);
 139 
 140 #ifndef _LP64
 141   // Reload the 64 bit Oregs. Although they are now Iregs we load them
 142   // to Oregs here to avoid interrupts cutting off their heads
 143 
 144   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 145   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 146   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
 147   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
 148   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
 149   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
 150 
 151   __ stx(O0, SP, o0_offset+STACK_BIAS);
 152   map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
 153 
 154   __ stx(O1, SP, o1_offset+STACK_BIAS);
 155 
 156   map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
 157 
 158   __ stx(O2, SP, o2_offset+STACK_BIAS);
 159   map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
 160 
 161   __ stx(O3, SP, o3_offset+STACK_BIAS);
 162   map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
 163 
 164   __ stx(O4, SP, o4_offset+STACK_BIAS);
 165   map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
 166 
 167   __ stx(O5, SP, o5_offset+STACK_BIAS);
 168   map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
 169 #endif /* _LP64 */
 170 
 171 
 172 #ifdef _LP64
 173   int debug_offset = 0;
 174 #else
 175   int debug_offset = 4;
 176 #endif
 177   // Save the G's
 178   __ stx(G1, SP, g1_offset+STACK_BIAS);
 179   map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
 180 
 181   __ stx(G3, SP, g3_offset+STACK_BIAS);
 182   map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
 183 
 184   __ stx(G4, SP, g4_offset+STACK_BIAS);
 185   map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
 186 
 187   __ stx(G5, SP, g5_offset+STACK_BIAS);
 188   map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
 189 
 190   // This is really a waste but we'll keep things as they were for now
 191   if (true) {
 192 #ifndef _LP64
 193     map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
 194     map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
 195     map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
 196     map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
 197     map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
 198     map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
 199     map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
 200     map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
 201     map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
 202     map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
 203 #endif /* _LP64 */
 204   }
 205 
 206 
 207   // Save the flags
 208   __ rdccr( G5 );
 209   __ stx(G5, SP, ccr_offset+STACK_BIAS);
 210   __ stxfsr(SP, fsr_offset+STACK_BIAS);
 211 
 212   // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
 213   int offset = d00_offset;
 214   for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
 215     FloatRegister f = as_FloatRegister(i);
 216     __ stf(FloatRegisterImpl::D,  f, SP, offset+STACK_BIAS);
 217     // Record as callee saved both halves of double registers (2 float registers).
 218     map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
 219     map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
 220     offset += sizeof(double);
 221   }
 222 
 223   // And we're done.
 224 
 225   return map;
 226 }
 227 
 228 
 229 // Pop the current frame and restore all the registers that we
 230 // saved.
 231 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 232 
 233   // Restore all the FP registers
 234   for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
 235     __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
 236   }
 237 
 238   __ ldx(SP, ccr_offset+STACK_BIAS, G1);
 239   __ wrccr (G1) ;
 240 
 241   // Restore the G's
 242   // Note that G2 (AKA GThread) must be saved and restored separately.
 243   // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
 244 
 245   __ ldx(SP, g1_offset+STACK_BIAS, G1);
 246   __ ldx(SP, g3_offset+STACK_BIAS, G3);
 247   __ ldx(SP, g4_offset+STACK_BIAS, G4);
 248   __ ldx(SP, g5_offset+STACK_BIAS, G5);
 249 
 250 
 251 #if !defined(_LP64)
 252   // Restore the 64-bit O's.
 253   __ ldx(SP, o0_offset+STACK_BIAS, O0);
 254   __ ldx(SP, o1_offset+STACK_BIAS, O1);
 255   __ ldx(SP, o2_offset+STACK_BIAS, O2);
 256   __ ldx(SP, o3_offset+STACK_BIAS, O3);
 257   __ ldx(SP, o4_offset+STACK_BIAS, O4);
 258   __ ldx(SP, o5_offset+STACK_BIAS, O5);
 259 
 260   // And temporarily place them in TLS
 261 
 262   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 263   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 264   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
 265   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
 266   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
 267   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
 268 #endif /* _LP64 */
 269 
 270   // Restore flags
 271 
 272   __ ldxfsr(SP, fsr_offset+STACK_BIAS);
 273 
 274   __ restore();
 275 
 276 #if !defined(_LP64)
 277   // Now reload the 64bit Oregs after we've restore the window.
 278   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 279   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 280   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
 281   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
 282   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
 283   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
 284 #endif /* _LP64 */
 285 
 286 }
 287 
 288 // Pop the current frame and restore the registers that might be holding
 289 // a result.
 290 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 291 
 292 #if !defined(_LP64)
 293   // 32bit build returns longs in G1
 294   __ ldx(SP, g1_offset+STACK_BIAS, G1);
 295 
 296   // Retrieve the 64-bit O's.
 297   __ ldx(SP, o0_offset+STACK_BIAS, O0);
 298   __ ldx(SP, o1_offset+STACK_BIAS, O1);
 299   // and save to TLS
 300   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 301   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 302 #endif /* _LP64 */
 303 
 304   __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
 305 
 306   __ restore();
 307 
 308 #if !defined(_LP64)
 309   // Now reload the 64bit Oregs after we've restore the window.
 310   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 311   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 312 #endif /* _LP64 */
 313 
 314 }
 315 
 316 // The java_calling_convention describes stack locations as ideal slots on
 317 // a frame with no abi restrictions. Since we must observe abi restrictions
 318 // (like the placement of the register window) the slots must be biased by
 319 // the following value.
 320 static int reg2offset(VMReg r) {
 321   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 322 }
 323 
 324 // ---------------------------------------------------------------------------
 325 // Read the array of BasicTypes from a signature, and compute where the
 326 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
 327 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 328 // refer to 4-byte stack slots.  All stack slots are based off of the window
 329 // top.  VMRegImpl::stack0 refers to the first slot past the 16-word window,
 330 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 331 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
 332 // integer registers.  Values 64-95 are the (32-bit only) float registers.
 333 // Each 32-bit quantity is given its own number, so the integer registers
 334 // (in either 32- or 64-bit builds) use 2 numbers.  For example, there is
 335 // an O0-low and an O0-high.  Essentially, all int register numbers are doubled.
 336 
 337 // Register results are passed in O0-O5, for outgoing call arguments.  To
 338 // convert to incoming arguments, convert all O's to I's.  The regs array
 339 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
 340 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
 341 // 32-bit value was passed).  If both are VMRegImpl::Bad(), it means no value was
 342 // passed (used as a placeholder for the other half of longs and doubles in
 343 // the 64-bit build).  regs[].second() is either VMRegImpl::Bad() or regs[].second() is
 344 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
 345 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
 346 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
 347 // same VMRegPair.
 348 
 349 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 350 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 351 // units regardless of build.
 352 
 353 
 354 // ---------------------------------------------------------------------------
 355 // The compiled Java calling convention.  The Java convention always passes
 356 // 64-bit values in adjacent aligned locations (either registers or stack),
 357 // floats in float registers and doubles in aligned float pairs.  Values are
 358 // packed in the registers.  There is no backing varargs store for values in
 359 // registers.  In the 32-bit build, longs are passed in G1 and G4 (cannot be
 360 // passed in I's, because longs in I's get their heads chopped off at
 361 // interrupt).
 362 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 363                                            VMRegPair *regs,
 364                                            int total_args_passed,
 365                                            int is_outgoing) {
 366   assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
 367 
 368   // Convention is to pack the first 6 int/oop args into the first 6 registers
 369   // (I0-I5), extras spill to the stack.  Then pack the first 8 float args
 370   // into F0-F7, extras spill to the stack.  Then pad all register sets to
 371   // align.  Then put longs and doubles into the same registers as they fit,
 372   // else spill to the stack.
 373   const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
 374   const int flt_reg_max = 8;
 375   //
 376   // Where 32-bit 1-reg longs start being passed
 377   // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
 378   // So make it look like we've filled all the G regs that c2 wants to use.
 379   Register g_reg = TieredCompilation ? noreg : G1;
 380 
 381   // Count int/oop and float args.  See how many stack slots we'll need and
 382   // where the longs & doubles will go.
 383   int int_reg_cnt   = 0;
 384   int flt_reg_cnt   = 0;
 385   // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
 386   // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
 387   int stk_reg_pairs = 0;
 388   for (int i = 0; i < total_args_passed; i++) {
 389     switch (sig_bt[i]) {
 390     case T_LONG:                // LP64, longs compete with int args
 391       assert(sig_bt[i+1] == T_VOID, "");
 392 #ifdef _LP64
 393       if (int_reg_cnt < int_reg_max) int_reg_cnt++;
 394 #endif
 395       break;
 396     case T_OBJECT:
 397     case T_ARRAY:
 398     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 399       if (int_reg_cnt < int_reg_max) int_reg_cnt++;
 400 #ifndef _LP64
 401       else                            stk_reg_pairs++;
 402 #endif
 403       break;
 404     case T_INT:
 405     case T_SHORT:
 406     case T_CHAR:
 407     case T_BYTE:
 408     case T_BOOLEAN:
 409       if (int_reg_cnt < int_reg_max) int_reg_cnt++;
 410       else                            stk_reg_pairs++;
 411       break;
 412     case T_FLOAT:
 413       if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
 414       else                            stk_reg_pairs++;
 415       break;
 416     case T_DOUBLE:
 417       assert(sig_bt[i+1] == T_VOID, "");
 418       break;
 419     case T_VOID:
 420       break;
 421     default:
 422       ShouldNotReachHere();
 423     }
 424   }
 425 
 426   // This is where the longs/doubles start on the stack.
 427   stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
 428 
 429   int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
 430   int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
 431 
 432   // int stk_reg = frame::register_save_words*(wordSize>>2);
 433   // int stk_reg = SharedRuntime::out_preserve_stack_slots();
 434   int stk_reg = 0;
 435   int int_reg = 0;
 436   int flt_reg = 0;
 437 
 438   // Now do the signature layout
 439   for (int i = 0; i < total_args_passed; i++) {
 440     switch (sig_bt[i]) {
 441     case T_INT:
 442     case T_SHORT:
 443     case T_CHAR:
 444     case T_BYTE:
 445     case T_BOOLEAN:
 446 #ifndef _LP64
 447     case T_OBJECT:
 448     case T_ARRAY:
 449     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 450 #endif // _LP64
 451       if (int_reg < int_reg_max) {
 452         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 453         regs[i].set1(r->as_VMReg());
 454       } else {
 455         regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
 456       }
 457       break;
 458 
 459 #ifdef _LP64
 460     case T_OBJECT:
 461     case T_ARRAY:
 462     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 463       if (int_reg < int_reg_max) {
 464         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 465         regs[i].set2(r->as_VMReg());
 466       } else {
 467         regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 468         stk_reg_pairs += 2;
 469       }
 470       break;
 471 #endif // _LP64
 472 
 473     case T_LONG:
 474       assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
 475 #ifdef _LP64
 476         if (int_reg < int_reg_max) {
 477           Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 478           regs[i].set2(r->as_VMReg());
 479         } else {
 480           regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 481           stk_reg_pairs += 2;
 482         }
 483 #else
 484 #ifdef COMPILER2
 485         // For 32-bit build, can't pass longs in O-regs because they become
 486         // I-regs and get trashed.  Use G-regs instead.  G1 and G4 are almost
 487         // spare and available.  This convention isn't used by the Sparc ABI or
 488         // anywhere else. If we're tiered then we don't use G-regs because c1
 489         // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
 490         // G0: zero
 491         // G1: 1st Long arg
 492         // G2: global allocated to TLS
 493         // G3: used in inline cache check
 494         // G4: 2nd Long arg
 495         // G5: used in inline cache check
 496         // G6: used by OS
 497         // G7: used by OS
 498 
 499         if (g_reg == G1) {
 500           regs[i].set2(G1->as_VMReg()); // This long arg in G1
 501           g_reg = G4;                  // Where the next arg goes
 502         } else if (g_reg == G4) {
 503           regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
 504           g_reg = noreg;               // No more longs in registers
 505         } else {
 506           regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 507           stk_reg_pairs += 2;
 508         }
 509 #else // COMPILER2
 510         if (int_reg_pairs + 1 < int_reg_max) {
 511           if (is_outgoing) {
 512             regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
 513           } else {
 514             regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
 515           }
 516           int_reg_pairs += 2;
 517         } else {
 518           regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 519           stk_reg_pairs += 2;
 520         }
 521 #endif // COMPILER2
 522 #endif // _LP64
 523       break;
 524 
 525     case T_FLOAT:
 526       if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
 527       else                       regs[i].set1(    VMRegImpl::stack2reg(stk_reg++));
 528       break;
 529     case T_DOUBLE:
 530       assert(sig_bt[i+1] == T_VOID, "expecting half");
 531       if (flt_reg_pairs + 1 < flt_reg_max) {
 532         regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
 533         flt_reg_pairs += 2;
 534       } else {
 535         regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 536         stk_reg_pairs += 2;
 537       }
 538       break;
 539     case T_VOID: regs[i].set_bad();  break; // Halves of longs & doubles
 540     default:
 541       ShouldNotReachHere();
 542     }
 543   }
 544 
 545   // retun the amount of stack space these arguments will need.
 546   return stk_reg_pairs;
 547 
 548 }
 549 
 550 // Helper class mostly to avoid passing masm everywhere, and handle
 551 // store displacement overflow logic.
 552 class AdapterGenerator {
 553   MacroAssembler *masm;
 554   Register Rdisp;
 555   void set_Rdisp(Register r)  { Rdisp = r; }
 556 
 557   void patch_callers_callsite();
 558 
 559   // base+st_off points to top of argument
 560   int arg_offset(const int st_off) { return st_off; }
 561   int next_arg_offset(const int st_off) {
 562     return st_off - Interpreter::stackElementSize;
 563   }
 564 
 565   // Argument slot values may be loaded first into a register because
 566   // they might not fit into displacement.
 567   RegisterOrConstant arg_slot(const int st_off);
 568   RegisterOrConstant next_arg_slot(const int st_off);
 569 
 570   // Stores long into offset pointed to by base
 571   void store_c2i_long(Register r, Register base,
 572                       const int st_off, bool is_stack);
 573   void store_c2i_object(Register r, Register base,
 574                         const int st_off);
 575   void store_c2i_int(Register r, Register base,
 576                      const int st_off);
 577   void store_c2i_double(VMReg r_2,
 578                         VMReg r_1, Register base, const int st_off);
 579   void store_c2i_float(FloatRegister f, Register base,
 580                        const int st_off);
 581 
 582  public:
 583   void gen_c2i_adapter(int total_args_passed,
 584                               // VMReg max_arg,
 585                               int comp_args_on_stack, // VMRegStackSlots
 586                               const BasicType *sig_bt,
 587                               const VMRegPair *regs,
 588                               Label& skip_fixup);
 589   void gen_i2c_adapter(int total_args_passed,
 590                               // VMReg max_arg,
 591                               int comp_args_on_stack, // VMRegStackSlots
 592                               const BasicType *sig_bt,
 593                               const VMRegPair *regs);
 594 
 595   AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
 596 };
 597 
 598 
 599 // Patch the callers callsite with entry to compiled code if it exists.
 600 void AdapterGenerator::patch_callers_callsite() {
 601   Label L;
 602   __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
 603   __ br_null(G3_scratch, false, __ pt, L);
 604   // Schedule the branch target address early.
 605   __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
 606   // Call into the VM to patch the caller, then jump to compiled callee
 607   __ save_frame(4);     // Args in compiled layout; do not blow them
 608 
 609   // Must save all the live Gregs the list is:
 610   // G1: 1st Long arg (32bit build)
 611   // G2: global allocated to TLS
 612   // G3: used in inline cache check (scratch)
 613   // G4: 2nd Long arg (32bit build);
 614   // G5: used in inline cache check (methodOop)
 615 
 616   // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
 617 
 618 #ifdef _LP64
 619   // mov(s,d)
 620   __ mov(G1, L1);
 621   __ mov(G4, L4);
 622   __ mov(G5_method, L5);
 623   __ mov(G5_method, O0);         // VM needs target method
 624   __ mov(I7, O1);                // VM needs caller's callsite
 625   // Must be a leaf call...
 626   // can be very far once the blob has been relocated
 627   AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
 628   __ relocate(relocInfo::runtime_call_type);
 629   __ jumpl_to(dest, O7, O7);
 630   __ delayed()->mov(G2_thread, L7_thread_cache);
 631   __ mov(L7_thread_cache, G2_thread);
 632   __ mov(L1, G1);
 633   __ mov(L4, G4);
 634   __ mov(L5, G5_method);
 635 #else
 636   __ stx(G1, FP, -8 + STACK_BIAS);
 637   __ stx(G4, FP, -16 + STACK_BIAS);
 638   __ mov(G5_method, L5);
 639   __ mov(G5_method, O0);         // VM needs target method
 640   __ mov(I7, O1);                // VM needs caller's callsite
 641   // Must be a leaf call...
 642   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
 643   __ delayed()->mov(G2_thread, L7_thread_cache);
 644   __ mov(L7_thread_cache, G2_thread);
 645   __ ldx(FP, -8 + STACK_BIAS, G1);
 646   __ ldx(FP, -16 + STACK_BIAS, G4);
 647   __ mov(L5, G5_method);
 648   __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
 649 #endif /* _LP64 */
 650 
 651   __ restore();      // Restore args
 652   __ bind(L);
 653 }
 654 
 655 
 656 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
 657   RegisterOrConstant roc(arg_offset(st_off));
 658   return __ ensure_simm13_or_reg(roc, Rdisp);
 659 }
 660 
 661 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
 662   RegisterOrConstant roc(next_arg_offset(st_off));
 663   return __ ensure_simm13_or_reg(roc, Rdisp);
 664 }
 665 
 666 
 667 // Stores long into offset pointed to by base
 668 void AdapterGenerator::store_c2i_long(Register r, Register base,
 669                                       const int st_off, bool is_stack) {
 670 #ifdef _LP64
 671   // In V9, longs are given 2 64-bit slots in the interpreter, but the
 672   // data is passed in only 1 slot.
 673   __ stx(r, base, next_arg_slot(st_off));
 674 #else
 675 #ifdef COMPILER2
 676   // Misaligned store of 64-bit data
 677   __ stw(r, base, arg_slot(st_off));    // lo bits
 678   __ srlx(r, 32, r);
 679   __ stw(r, base, next_arg_slot(st_off));  // hi bits
 680 #else
 681   if (is_stack) {
 682     // Misaligned store of 64-bit data
 683     __ stw(r, base, arg_slot(st_off));    // lo bits
 684     __ srlx(r, 32, r);
 685     __ stw(r, base, next_arg_slot(st_off));  // hi bits
 686   } else {
 687     __ stw(r->successor(), base, arg_slot(st_off)     ); // lo bits
 688     __ stw(r             , base, next_arg_slot(st_off)); // hi bits
 689   }
 690 #endif // COMPILER2
 691 #endif // _LP64
 692 }
 693 
 694 void AdapterGenerator::store_c2i_object(Register r, Register base,
 695                       const int st_off) {
 696   __ st_ptr (r, base, arg_slot(st_off));
 697 }
 698 
 699 void AdapterGenerator::store_c2i_int(Register r, Register base,
 700                    const int st_off) {
 701   __ st (r, base, arg_slot(st_off));
 702 }
 703 
 704 // Stores into offset pointed to by base
 705 void AdapterGenerator::store_c2i_double(VMReg r_2,
 706                       VMReg r_1, Register base, const int st_off) {
 707 #ifdef _LP64
 708   // In V9, doubles are given 2 64-bit slots in the interpreter, but the
 709   // data is passed in only 1 slot.
 710   __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
 711 #else
 712   // Need to marshal 64-bit value from misaligned Lesp loads
 713   __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
 714   __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
 715 #endif
 716 }
 717 
 718 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
 719                                        const int st_off) {
 720   __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
 721 }
 722 
 723 void AdapterGenerator::gen_c2i_adapter(
 724                             int total_args_passed,
 725                             // VMReg max_arg,
 726                             int comp_args_on_stack, // VMRegStackSlots
 727                             const BasicType *sig_bt,
 728                             const VMRegPair *regs,
 729                             Label& skip_fixup) {
 730 
 731   // Before we get into the guts of the C2I adapter, see if we should be here
 732   // at all.  We've come from compiled code and are attempting to jump to the
 733   // interpreter, which means the caller made a static call to get here
 734   // (vcalls always get a compiled target if there is one).  Check for a
 735   // compiled target.  If there is one, we need to patch the caller's call.
 736   // However we will run interpreted if we come thru here. The next pass
 737   // thru the call site will run compiled. If we ran compiled here then
 738   // we can (theorectically) do endless i2c->c2i->i2c transitions during
 739   // deopt/uncommon trap cycles. If we always go interpreted here then
 740   // we can have at most one and don't need to play any tricks to keep
 741   // from endlessly growing the stack.
 742   //
 743   // Actually if we detected that we had an i2c->c2i transition here we
 744   // ought to be able to reset the world back to the state of the interpreted
 745   // call and not bother building another interpreter arg area. We don't
 746   // do that at this point.
 747 
 748   patch_callers_callsite();
 749 
 750   __ bind(skip_fixup);
 751 
 752   // Since all args are passed on the stack, total_args_passed*wordSize is the
 753   // space we need.  Add in varargs area needed by the interpreter. Round up
 754   // to stack alignment.
 755   const int arg_size = total_args_passed * Interpreter::stackElementSize;
 756   const int varargs_area =
 757                  (frame::varargs_offset - frame::register_save_words)*wordSize;
 758   const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
 759 
 760   int bias = STACK_BIAS;
 761   const int interp_arg_offset = frame::varargs_offset*wordSize +
 762                         (total_args_passed-1)*Interpreter::stackElementSize;
 763 
 764   Register base = SP;
 765 
 766 #ifdef _LP64
 767   // In the 64bit build because of wider slots and STACKBIAS we can run
 768   // out of bits in the displacement to do loads and stores.  Use g3 as
 769   // temporary displacement.
 770   if (! __ is_simm13(extraspace)) {
 771     __ set(extraspace, G3_scratch);
 772     __ sub(SP, G3_scratch, SP);
 773   } else {
 774     __ sub(SP, extraspace, SP);
 775   }
 776   set_Rdisp(G3_scratch);
 777 #else
 778   __ sub(SP, extraspace, SP);
 779 #endif // _LP64
 780 
 781   // First write G1 (if used) to where ever it must go
 782   for (int i=0; i<total_args_passed; i++) {
 783     const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
 784     VMReg r_1 = regs[i].first();
 785     VMReg r_2 = regs[i].second();
 786     if (r_1 == G1_scratch->as_VMReg()) {
 787       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
 788         store_c2i_object(G1_scratch, base, st_off);
 789       } else if (sig_bt[i] == T_LONG) {
 790         assert(!TieredCompilation, "should not use register args for longs");
 791         store_c2i_long(G1_scratch, base, st_off, false);
 792       } else {
 793         store_c2i_int(G1_scratch, base, st_off);
 794       }
 795     }
 796   }
 797 
 798   // Now write the args into the outgoing interpreter space
 799   for (int i=0; i<total_args_passed; i++) {
 800     const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
 801     VMReg r_1 = regs[i].first();
 802     VMReg r_2 = regs[i].second();
 803     if (!r_1->is_valid()) {
 804       assert(!r_2->is_valid(), "");
 805       continue;
 806     }
 807     // Skip G1 if found as we did it first in order to free it up
 808     if (r_1 == G1_scratch->as_VMReg()) {
 809       continue;
 810     }
 811 #ifdef ASSERT
 812     bool G1_forced = false;
 813 #endif // ASSERT
 814     if (r_1->is_stack()) {        // Pretend stack targets are loaded into G1
 815 #ifdef _LP64
 816       Register ld_off = Rdisp;
 817       __ set(reg2offset(r_1) + extraspace + bias, ld_off);
 818 #else
 819       int ld_off = reg2offset(r_1) + extraspace + bias;
 820 #endif // _LP64
 821 #ifdef ASSERT
 822       G1_forced = true;
 823 #endif // ASSERT
 824       r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
 825       if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
 826       else                  __ ldx(base, ld_off, G1_scratch);
 827     }
 828 
 829     if (r_1->is_Register()) {
 830       Register r = r_1->as_Register()->after_restore();
 831       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
 832         store_c2i_object(r, base, st_off);
 833       } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 834 #ifndef _LP64
 835         if (TieredCompilation) {
 836           assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
 837         }
 838 #endif // _LP64
 839         store_c2i_long(r, base, st_off, r_2->is_stack());
 840       } else {
 841         store_c2i_int(r, base, st_off);
 842       }
 843     } else {
 844       assert(r_1->is_FloatRegister(), "");
 845       if (sig_bt[i] == T_FLOAT) {
 846         store_c2i_float(r_1->as_FloatRegister(), base, st_off);
 847       } else {
 848         assert(sig_bt[i] == T_DOUBLE, "wrong type");
 849         store_c2i_double(r_2, r_1, base, st_off);
 850       }
 851     }
 852   }
 853 
 854 #ifdef _LP64
 855   // Need to reload G3_scratch, used for temporary displacements.
 856   __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
 857 
 858   // Pass O5_savedSP as an argument to the interpreter.
 859   // The interpreter will restore SP to this value before returning.
 860   __ set(extraspace, G1);
 861   __ add(SP, G1, O5_savedSP);
 862 #else
 863   // Pass O5_savedSP as an argument to the interpreter.
 864   // The interpreter will restore SP to this value before returning.
 865   __ add(SP, extraspace, O5_savedSP);
 866 #endif // _LP64
 867 
 868   __ mov((frame::varargs_offset)*wordSize -
 869          1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
 870   // Jump to the interpreter just as if interpreter was doing it.
 871   __ jmpl(G3_scratch, 0, G0);
 872   // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
 873   // (really L0) is in use by the compiled frame as a generic temp.  However,
 874   // the interpreter does not know where its args are without some kind of
 875   // arg pointer being passed in.  Pass it in Gargs.
 876   __ delayed()->add(SP, G1, Gargs);
 877 }
 878 
 879 void AdapterGenerator::gen_i2c_adapter(
 880                             int total_args_passed,
 881                             // VMReg max_arg,
 882                             int comp_args_on_stack, // VMRegStackSlots
 883                             const BasicType *sig_bt,
 884                             const VMRegPair *regs) {
 885 
 886   // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
 887   // layout.  Lesp was saved by the calling I-frame and will be restored on
 888   // return.  Meanwhile, outgoing arg space is all owned by the callee
 889   // C-frame, so we can mangle it at will.  After adjusting the frame size,
 890   // hoist register arguments and repack other args according to the compiled
 891   // code convention.  Finally, end in a jump to the compiled code.  The entry
 892   // point address is the start of the buffer.
 893 
 894   // We will only enter here from an interpreted frame and never from after
 895   // passing thru a c2i. Azul allowed this but we do not. If we lose the
 896   // race and use a c2i we will remain interpreted for the race loser(s).
 897   // This removes all sorts of headaches on the x86 side and also eliminates
 898   // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
 899 
 900   // As you can see from the list of inputs & outputs there are not a lot
 901   // of temp registers to work with: mostly G1, G3 & G4.
 902 
 903   // Inputs:
 904   // G2_thread      - TLS
 905   // G5_method      - Method oop
 906   // G4 (Gargs)     - Pointer to interpreter's args
 907   // O0..O4         - free for scratch
 908   // O5_savedSP     - Caller's saved SP, to be restored if needed
 909   // O6             - Current SP!
 910   // O7             - Valid return address
 911   // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
 912 
 913   // Outputs:
 914   // G2_thread      - TLS
 915   // G1, G4         - Outgoing long args in 32-bit build
 916   // O0-O5          - Outgoing args in compiled layout
 917   // O6             - Adjusted or restored SP
 918   // O7             - Valid return address
 919   // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
 920   // F0-F7          - more outgoing args
 921 
 922 
 923   // Gargs is the incoming argument base, and also an outgoing argument.
 924   __ sub(Gargs, BytesPerWord, Gargs);
 925 
 926   // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
 927   // WITH O7 HOLDING A VALID RETURN PC
 928   //
 929   // |              |
 930   // :  java stack  :
 931   // |              |
 932   // +--------------+ <--- start of outgoing args
 933   // |   receiver   |   |
 934   // : rest of args :   |---size is java-arg-words
 935   // |              |   |
 936   // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
 937   // |              |   |
 938   // :    unused    :   |---Space for max Java stack, plus stack alignment
 939   // |              |   |
 940   // +--------------+ <--- SP + 16*wordsize
 941   // |              |
 942   // :    window    :
 943   // |              |
 944   // +--------------+ <--- SP
 945 
 946   // WE REPACK THE STACK.  We use the common calling convention layout as
 947   // discovered by calling SharedRuntime::calling_convention.  We assume it
 948   // causes an arbitrary shuffle of memory, which may require some register
 949   // temps to do the shuffle.  We hope for (and optimize for) the case where
 950   // temps are not needed.  We may have to resize the stack slightly, in case
 951   // we need alignment padding (32-bit interpreter can pass longs & doubles
 952   // misaligned, but the compilers expect them aligned).
 953   //
 954   // |              |
 955   // :  java stack  :
 956   // |              |
 957   // +--------------+ <--- start of outgoing args
 958   // |  pad, align  |   |
 959   // +--------------+   |
 960   // | ints, floats |   |---Outgoing stack args, packed low.
 961   // +--------------+   |   First few args in registers.
 962   // :   doubles    :   |
 963   // |   longs      |   |
 964   // +--------------+ <--- SP' + 16*wordsize
 965   // |              |
 966   // :    window    :
 967   // |              |
 968   // +--------------+ <--- SP'
 969 
 970   // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
 971   // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
 972   // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
 973 
 974   // Cut-out for having no stack args.  Since up to 6 args are passed
 975   // in registers, we will commonly have no stack args.
 976   if (comp_args_on_stack > 0) {
 977 
 978     // Convert VMReg stack slots to words.
 979     int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 980     // Round up to miminum stack alignment, in wordSize
 981     comp_words_on_stack = round_to(comp_words_on_stack, 2);
 982     // Now compute the distance from Lesp to SP.  This calculation does not
 983     // include the space for total_args_passed because Lesp has not yet popped
 984     // the arguments.
 985     __ sub(SP, (comp_words_on_stack)*wordSize, SP);
 986   }
 987 
 988   // Will jump to the compiled code just as if compiled code was doing it.
 989   // Pre-load the register-jump target early, to schedule it better.
 990   __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
 991 
 992   // Now generate the shuffle code.  Pick up all register args and move the
 993   // rest through G1_scratch.
 994   for (int i=0; i<total_args_passed; i++) {
 995     if (sig_bt[i] == T_VOID) {
 996       // Longs and doubles are passed in native word order, but misaligned
 997       // in the 32-bit build.
 998       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 999       continue;
1000     }
1001 
1002     // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
1003     // 32-bit build and aligned in the 64-bit build.  Look for the obvious
1004     // ldx/lddf optimizations.
1005 
1006     // Load in argument order going down.
1007     const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
1008     set_Rdisp(G1_scratch);
1009 
1010     VMReg r_1 = regs[i].first();
1011     VMReg r_2 = regs[i].second();
1012     if (!r_1->is_valid()) {
1013       assert(!r_2->is_valid(), "");
1014       continue;
1015     }
1016     if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
1017       r_1 = F8->as_VMReg();        // as part of the load/store shuffle
1018       if (r_2->is_valid()) r_2 = r_1->next();
1019     }
1020     if (r_1->is_Register()) {  // Register argument
1021       Register r = r_1->as_Register()->after_restore();
1022       if (!r_2->is_valid()) {
1023         __ ld(Gargs, arg_slot(ld_off), r);
1024       } else {
1025 #ifdef _LP64
1026         // In V9, longs are given 2 64-bit slots in the interpreter, but the
1027         // data is passed in only 1 slot.
1028         RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
1029               next_arg_slot(ld_off) : arg_slot(ld_off);
1030         __ ldx(Gargs, slot, r);
1031 #else
1032         // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
1033         // stack shuffle.  Load the first 2 longs into G1/G4 later.
1034 #endif
1035       }
1036     } else {
1037       assert(r_1->is_FloatRegister(), "");
1038       if (!r_2->is_valid()) {
1039         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
1040       } else {
1041 #ifdef _LP64
1042         // In V9, doubles are given 2 64-bit slots in the interpreter, but the
1043         // data is passed in only 1 slot.  This code also handles longs that
1044         // are passed on the stack, but need a stack-to-stack move through a
1045         // spare float register.
1046         RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
1047               next_arg_slot(ld_off) : arg_slot(ld_off);
1048         __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
1049 #else
1050         // Need to marshal 64-bit value from misaligned Lesp loads
1051         __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
1052         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
1053 #endif
1054       }
1055     }
1056     // Was the argument really intended to be on the stack, but was loaded
1057     // into F8/F9?
1058     if (regs[i].first()->is_stack()) {
1059       assert(r_1->as_FloatRegister() == F8, "fix this code");
1060       // Convert stack slot to an SP offset
1061       int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
1062       // Store down the shuffled stack word.  Target address _is_ aligned.
1063       RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
1064       if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
1065       else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
1066     }
1067   }
1068   bool made_space = false;
1069 #ifndef _LP64
1070   // May need to pick up a few long args in G1/G4
1071   bool g4_crushed = false;
1072   bool g3_crushed = false;
1073   for (int i=0; i<total_args_passed; i++) {
1074     if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
1075       // Load in argument order going down
1076       int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
1077       // Need to marshal 64-bit value from misaligned Lesp loads
1078       Register r = regs[i].first()->as_Register()->after_restore();
1079       if (r == G1 || r == G4) {
1080         assert(!g4_crushed, "ordering problem");
1081         if (r == G4){
1082           g4_crushed = true;
1083           __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
1084           __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
1085         } else {
1086           // better schedule this way
1087           __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
1088           __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
1089         }
1090         g3_crushed = true;
1091         __ sllx(r, 32, r);
1092         __ or3(G3_scratch, r, r);
1093       } else {
1094         assert(r->is_out(), "longs passed in two O registers");
1095         __ ld  (Gargs, arg_slot(ld_off)     , r->successor()); // Load lo bits
1096         __ ld  (Gargs, next_arg_slot(ld_off), r);              // Load hi bits
1097       }
1098     }
1099   }
1100 #endif
1101 
1102   // Jump to the compiled code just as if compiled code was doing it.
1103   //
1104 #ifndef _LP64
1105     if (g3_crushed) {
1106       // Rats load was wasted, at least it is in cache...
1107       __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
1108     }
1109 #endif /* _LP64 */
1110 
1111     // 6243940 We might end up in handle_wrong_method if
1112     // the callee is deoptimized as we race thru here. If that
1113     // happens we don't want to take a safepoint because the
1114     // caller frame will look interpreted and arguments are now
1115     // "compiled" so it is much better to make this transition
1116     // invisible to the stack walking code. Unfortunately if
1117     // we try and find the callee by normal means a safepoint
1118     // is possible. So we stash the desired callee in the thread
1119     // and the vm will find there should this case occur.
1120     Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1121     __ st_ptr(G5_method, callee_target_addr);
1122 
1123     if (StressNonEntrant) {
1124       // Open a big window for deopt failure
1125       __ save_frame(0);
1126       __ mov(G0, L0);
1127       Label loop;
1128       __ bind(loop);
1129       __ sub(L0, 1, L0);
1130       __ br_null(L0, false, Assembler::pt, loop);
1131       __ delayed()->nop();
1132 
1133       __ restore();
1134     }
1135 
1136 
1137     __ jmpl(G3, 0, G0);
1138     __ delayed()->nop();
1139 }
1140 
1141 // ---------------------------------------------------------------
1142 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1143                                                             int total_args_passed,
1144                                                             // VMReg max_arg,
1145                                                             int comp_args_on_stack, // VMRegStackSlots
1146                                                             const BasicType *sig_bt,
1147                                                             const VMRegPair *regs,
1148                                                             AdapterFingerPrint* fingerprint) {
1149   address i2c_entry = __ pc();
1150 
1151   AdapterGenerator agen(masm);
1152 
1153   agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1154 
1155 
1156   // -------------------------------------------------------------------------
1157   // Generate a C2I adapter.  On entry we know G5 holds the methodOop.  The
1158   // args start out packed in the compiled layout.  They need to be unpacked
1159   // into the interpreter layout.  This will almost always require some stack
1160   // space.  We grow the current (compiled) stack, then repack the args.  We
1161   // finally end in a jump to the generic interpreter entry point.  On exit
1162   // from the interpreter, the interpreter will restore our SP (lest the
1163   // compiled code, which relys solely on SP and not FP, get sick).
1164 
1165   address c2i_unverified_entry = __ pc();
1166   Label skip_fixup;
1167   {
1168 #if !defined(_LP64) && defined(COMPILER2)
1169     Register R_temp   = L0;   // another scratch register
1170 #else
1171     Register R_temp   = G1;   // another scratch register
1172 #endif
1173 
1174     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1175 
1176     __ verify_oop(O0);
1177     __ verify_oop(G5_method);
1178     __ load_klass(O0, G3_scratch);
1179     __ verify_oop(G3_scratch);
1180 
1181 #if !defined(_LP64) && defined(COMPILER2)
1182     __ save(SP, -frame::register_save_words*wordSize, SP);
1183     __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1184     __ verify_oop(R_temp);
1185     __ cmp(G3_scratch, R_temp);
1186     __ restore();
1187 #else
1188     __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
1189     __ verify_oop(R_temp);
1190     __ cmp(G3_scratch, R_temp);
1191 #endif
1192 
1193     Label ok, ok2;
1194     __ brx(Assembler::equal, false, Assembler::pt, ok);
1195     __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
1196     __ jump_to(ic_miss, G3_scratch);
1197     __ delayed()->nop();
1198 
1199     __ bind(ok);
1200     // Method might have been compiled since the call site was patched to
1201     // interpreted if that is the case treat it as a miss so we can get
1202     // the call site corrected.
1203     __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
1204     __ bind(ok2);
1205     __ br_null(G3_scratch, false, __ pt, skip_fixup);
1206     __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
1207     __ jump_to(ic_miss, G3_scratch);
1208     __ delayed()->nop();
1209 
1210   }
1211 
1212   address c2i_entry = __ pc();
1213 
1214   agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1215 
1216   __ flush();
1217   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1218 
1219 }
1220 
1221 // Helper function for native calling conventions
1222 static VMReg int_stk_helper( int i ) {
1223   // Bias any stack based VMReg we get by ignoring the window area
1224   // but not the register parameter save area.
1225   //
1226   // This is strange for the following reasons. We'd normally expect
1227   // the calling convention to return an VMReg for a stack slot
1228   // completely ignoring any abi reserved area. C2 thinks of that
1229   // abi area as only out_preserve_stack_slots. This does not include
1230   // the area allocated by the C abi to store down integer arguments
1231   // because the java calling convention does not use it. So
1232   // since c2 assumes that there are only out_preserve_stack_slots
1233   // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1234   // location the c calling convention must add in this bias amount
1235   // to make up for the fact that the out_preserve_stack_slots is
1236   // insufficient for C calls. What a mess. I sure hope those 6
1237   // stack words were worth it on every java call!
1238 
1239   // Another way of cleaning this up would be for out_preserve_stack_slots
1240   // to take a parameter to say whether it was C or java calling conventions.
1241   // Then things might look a little better (but not much).
1242 
1243   int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1244   if( mem_parm_offset < 0 ) {
1245     return as_oRegister(i)->as_VMReg();
1246   } else {
1247     int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1248     // Now return a biased offset that will be correct when out_preserve_slots is added back in
1249     return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1250   }
1251 }
1252 
1253 
1254 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1255                                          VMRegPair *regs,
1256                                          int total_args_passed) {
1257 
1258     // Return the number of VMReg stack_slots needed for the args.
1259     // This value does not include an abi space (like register window
1260     // save area).
1261 
1262     // The native convention is V8 if !LP64
1263     // The LP64 convention is the V9 convention which is slightly more sane.
1264 
1265     // We return the amount of VMReg stack slots we need to reserve for all
1266     // the arguments NOT counting out_preserve_stack_slots. Since we always
1267     // have space for storing at least 6 registers to memory we start with that.
1268     // See int_stk_helper for a further discussion.
1269     int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1270 
1271 #ifdef _LP64
1272     // V9 convention: All things "as-if" on double-wide stack slots.
1273     // Hoist any int/ptr/long's in the first 6 to int regs.
1274     // Hoist any flt/dbl's in the first 16 dbl regs.
1275     int j = 0;                  // Count of actual args, not HALVES
1276     for( int i=0; i<total_args_passed; i++, j++ ) {
1277       switch( sig_bt[i] ) {
1278       case T_BOOLEAN:
1279       case T_BYTE:
1280       case T_CHAR:
1281       case T_INT:
1282       case T_SHORT:
1283         regs[i].set1( int_stk_helper( j ) ); break;
1284       case T_LONG:
1285         assert( sig_bt[i+1] == T_VOID, "expecting half" );
1286       case T_ADDRESS: // raw pointers, like current thread, for VM calls
1287       case T_ARRAY:
1288       case T_OBJECT:
1289         regs[i].set2( int_stk_helper( j ) );
1290         break;
1291       case T_FLOAT:
1292         if ( j < 16 ) {
1293           // V9ism: floats go in ODD registers
1294           regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
1295         } else {
1296           // V9ism: floats go in ODD stack slot
1297           regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
1298         }
1299         break;
1300       case T_DOUBLE:
1301         assert( sig_bt[i+1] == T_VOID, "expecting half" );
1302         if ( j < 16 ) {
1303           // V9ism: doubles go in EVEN/ODD regs
1304           regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
1305         } else {
1306           // V9ism: doubles go in EVEN/ODD stack slots
1307           regs[i].set2(VMRegImpl::stack2reg(j<<1));
1308         }
1309         break;
1310       case T_VOID:  regs[i].set_bad(); j--; break; // Do not count HALVES
1311       default:
1312         ShouldNotReachHere();
1313       }
1314       if (regs[i].first()->is_stack()) {
1315         int off =  regs[i].first()->reg2stack();
1316         if (off > max_stack_slots) max_stack_slots = off;
1317       }
1318       if (regs[i].second()->is_stack()) {
1319         int off =  regs[i].second()->reg2stack();
1320         if (off > max_stack_slots) max_stack_slots = off;
1321       }
1322     }
1323 
1324 #else // _LP64
1325     // V8 convention: first 6 things in O-regs, rest on stack.
1326     // Alignment is willy-nilly.
1327     for( int i=0; i<total_args_passed; i++ ) {
1328       switch( sig_bt[i] ) {
1329       case T_ADDRESS: // raw pointers, like current thread, for VM calls
1330       case T_ARRAY:
1331       case T_BOOLEAN:
1332       case T_BYTE:
1333       case T_CHAR:
1334       case T_FLOAT:
1335       case T_INT:
1336       case T_OBJECT:
1337       case T_SHORT:
1338         regs[i].set1( int_stk_helper( i ) );
1339         break;
1340       case T_DOUBLE:
1341       case T_LONG:
1342         assert( sig_bt[i+1] == T_VOID, "expecting half" );
1343         regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
1344         break;
1345       case T_VOID: regs[i].set_bad(); break;
1346       default:
1347         ShouldNotReachHere();
1348       }
1349       if (regs[i].first()->is_stack()) {
1350         int off =  regs[i].first()->reg2stack();
1351         if (off > max_stack_slots) max_stack_slots = off;
1352       }
1353       if (regs[i].second()->is_stack()) {
1354         int off =  regs[i].second()->reg2stack();
1355         if (off > max_stack_slots) max_stack_slots = off;
1356       }
1357     }
1358 #endif // _LP64
1359 
1360   return round_to(max_stack_slots + 1, 2);
1361 
1362 }
1363 
1364 
1365 // ---------------------------------------------------------------------------
1366 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1367   switch (ret_type) {
1368   case T_FLOAT:
1369     __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1370     break;
1371   case T_DOUBLE:
1372     __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1373     break;
1374   }
1375 }
1376 
1377 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1378   switch (ret_type) {
1379   case T_FLOAT:
1380     __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1381     break;
1382   case T_DOUBLE:
1383     __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1384     break;
1385   }
1386 }
1387 
1388 // Check and forward and pending exception.  Thread is stored in
1389 // L7_thread_cache and possibly NOT in G2_thread.  Since this is a native call, there
1390 // is no exception handler.  We merely pop this frame off and throw the
1391 // exception in the caller's frame.
1392 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1393   Label L;
1394   __ br_null(Rex_oop, false, Assembler::pt, L);
1395   __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1396   // Since this is a native call, we *know* the proper exception handler
1397   // without calling into the VM: it's the empty function.  Just pop this
1398   // frame and then jump to forward_exception_entry; O7 will contain the
1399   // native caller's return PC.
1400  AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1401   __ jump_to(exception_entry, G3_scratch);
1402   __ delayed()->restore();      // Pop this frame off.
1403   __ bind(L);
1404 }
1405 
1406 // A simple move of integer like type
1407 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1408   if (src.first()->is_stack()) {
1409     if (dst.first()->is_stack()) {
1410       // stack to stack
1411       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1412       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1413     } else {
1414       // stack to reg
1415       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1416     }
1417   } else if (dst.first()->is_stack()) {
1418     // reg to stack
1419     __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1420   } else {
1421     __ mov(src.first()->as_Register(), dst.first()->as_Register());
1422   }
1423 }
1424 
1425 // On 64 bit we will store integer like items to the stack as
1426 // 64 bits items (sparc abi) even though java would only store
1427 // 32bits for a parameter. On 32bit it will simply be 32 bits
1428 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1429 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1430   if (src.first()->is_stack()) {
1431     if (dst.first()->is_stack()) {
1432       // stack to stack
1433       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1434       __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1435     } else {
1436       // stack to reg
1437       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1438     }
1439   } else if (dst.first()->is_stack()) {
1440     // reg to stack
1441     __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1442   } else {
1443     __ mov(src.first()->as_Register(), dst.first()->as_Register());
1444   }
1445 }
1446 
1447 
1448 // An oop arg. Must pass a handle not the oop itself
1449 static void object_move(MacroAssembler* masm,
1450                         OopMap* map,
1451                         int oop_handle_offset,
1452                         int framesize_in_slots,
1453                         VMRegPair src,
1454                         VMRegPair dst,
1455                         bool is_receiver,
1456                         int* receiver_offset) {
1457 
1458   // must pass a handle. First figure out the location we use as a handle
1459 
1460   if (src.first()->is_stack()) {
1461     // Oop is already on the stack
1462     Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1463     __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1464     __ ld_ptr(rHandle, 0, L4);
1465 #ifdef _LP64
1466     __ movr( Assembler::rc_z, L4, G0, rHandle );
1467 #else
1468     __ tst( L4 );
1469     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1470 #endif
1471     if (dst.first()->is_stack()) {
1472       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1473     }
1474     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1475     if (is_receiver) {
1476       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1477     }
1478     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1479   } else {
1480     // Oop is in an input register pass we must flush it to the stack
1481     const Register rOop = src.first()->as_Register();
1482     const Register rHandle = L5;
1483     int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1484     int offset = oop_slot*VMRegImpl::stack_slot_size;
1485     Label skip;
1486     __ st_ptr(rOop, SP, offset + STACK_BIAS);
1487     if (is_receiver) {
1488       *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
1489     }
1490     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1491     __ add(SP, offset + STACK_BIAS, rHandle);
1492 #ifdef _LP64
1493     __ movr( Assembler::rc_z, rOop, G0, rHandle );
1494 #else
1495     __ tst( rOop );
1496     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1497 #endif
1498 
1499     if (dst.first()->is_stack()) {
1500       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1501     } else {
1502       __ mov(rHandle, dst.first()->as_Register());
1503     }
1504   }
1505 }
1506 
1507 // A float arg may have to do float reg int reg conversion
1508 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1509   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1510 
1511   if (src.first()->is_stack()) {
1512     if (dst.first()->is_stack()) {
1513       // stack to stack the easiest of the bunch
1514       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1515       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1516     } else {
1517       // stack to reg
1518       if (dst.first()->is_Register()) {
1519         __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1520       } else {
1521         __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1522       }
1523     }
1524   } else if (dst.first()->is_stack()) {
1525     // reg to stack
1526     if (src.first()->is_Register()) {
1527       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1528     } else {
1529       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1530     }
1531   } else {
1532     // reg to reg
1533     if (src.first()->is_Register()) {
1534       if (dst.first()->is_Register()) {
1535         // gpr -> gpr
1536         __ mov(src.first()->as_Register(), dst.first()->as_Register());
1537       } else {
1538         // gpr -> fpr
1539         __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1540         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1541       }
1542     } else if (dst.first()->is_Register()) {
1543       // fpr -> gpr
1544       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1545       __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1546     } else {
1547       // fpr -> fpr
1548       // In theory these overlap but the ordering is such that this is likely a nop
1549       if ( src.first() != dst.first()) {
1550         __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1551       }
1552     }
1553   }
1554 }
1555 
1556 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1557   VMRegPair src_lo(src.first());
1558   VMRegPair src_hi(src.second());
1559   VMRegPair dst_lo(dst.first());
1560   VMRegPair dst_hi(dst.second());
1561   simple_move32(masm, src_lo, dst_lo);
1562   simple_move32(masm, src_hi, dst_hi);
1563 }
1564 
1565 // A long move
1566 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1567 
1568   // Do the simple ones here else do two int moves
1569   if (src.is_single_phys_reg() ) {
1570     if (dst.is_single_phys_reg()) {
1571       __ mov(src.first()->as_Register(), dst.first()->as_Register());
1572     } else {
1573       // split src into two separate registers
1574       // Remember hi means hi address or lsw on sparc
1575       // Move msw to lsw
1576       if (dst.second()->is_reg()) {
1577         // MSW -> MSW
1578         __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1579         // Now LSW -> LSW
1580         // this will only move lo -> lo and ignore hi
1581         VMRegPair split(dst.second());
1582         simple_move32(masm, src, split);
1583       } else {
1584         VMRegPair split(src.first(), L4->as_VMReg());
1585         // MSW -> MSW (lo ie. first word)
1586         __ srax(src.first()->as_Register(), 32, L4);
1587         split_long_move(masm, split, dst);
1588       }
1589     }
1590   } else if (dst.is_single_phys_reg()) {
1591     if (src.is_adjacent_aligned_on_stack(2)) {
1592       __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1593     } else {
1594       // dst is a single reg.
1595       // Remember lo is low address not msb for stack slots
1596       // and lo is the "real" register for registers
1597       // src is
1598 
1599       VMRegPair split;
1600 
1601       if (src.first()->is_reg()) {
1602         // src.lo (msw) is a reg, src.hi is stk/reg
1603         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1604         split.set_pair(dst.first(), src.first());
1605       } else {
1606         // msw is stack move to L5
1607         // lsw is stack move to dst.lo (real reg)
1608         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1609         split.set_pair(dst.first(), L5->as_VMReg());
1610       }
1611 
1612       // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1613       // msw   -> src.lo/L5,  lsw -> dst.lo
1614       split_long_move(masm, src, split);
1615 
1616       // So dst now has the low order correct position the
1617       // msw half
1618       __ sllx(split.first()->as_Register(), 32, L5);
1619 
1620       const Register d = dst.first()->as_Register();
1621       __ or3(L5, d, d);
1622     }
1623   } else {
1624     // For LP64 we can probably do better.
1625     split_long_move(masm, src, dst);
1626   }
1627 }
1628 
1629 // A double move
1630 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1631 
1632   // The painful thing here is that like long_move a VMRegPair might be
1633   // 1: a single physical register
1634   // 2: two physical registers (v8)
1635   // 3: a physical reg [lo] and a stack slot [hi] (v8)
1636   // 4: two stack slots
1637 
1638   // Since src is always a java calling convention we know that the src pair
1639   // is always either all registers or all stack (and aligned?)
1640 
1641   // in a register [lo] and a stack slot [hi]
1642   if (src.first()->is_stack()) {
1643     if (dst.first()->is_stack()) {
1644       // stack to stack the easiest of the bunch
1645       // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1646       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1647       __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1648       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1649       __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1650     } else {
1651       // stack to reg
1652       if (dst.second()->is_stack()) {
1653         // stack -> reg, stack -> stack
1654         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1655         if (dst.first()->is_Register()) {
1656           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1657         } else {
1658           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1659         }
1660         // This was missing. (very rare case)
1661         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1662       } else {
1663         // stack -> reg
1664         // Eventually optimize for alignment QQQ
1665         if (dst.first()->is_Register()) {
1666           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1667           __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1668         } else {
1669           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1670           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1671         }
1672       }
1673     }
1674   } else if (dst.first()->is_stack()) {
1675     // reg to stack
1676     if (src.first()->is_Register()) {
1677       // Eventually optimize for alignment QQQ
1678       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1679       if (src.second()->is_stack()) {
1680         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1681         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1682       } else {
1683         __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1684       }
1685     } else {
1686       // fpr to stack
1687       if (src.second()->is_stack()) {
1688         ShouldNotReachHere();
1689       } else {
1690         // Is the stack aligned?
1691         if (reg2offset(dst.first()) & 0x7) {
1692           // No do as pairs
1693           __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1694           __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1695         } else {
1696           __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1697         }
1698       }
1699     }
1700   } else {
1701     // reg to reg
1702     if (src.first()->is_Register()) {
1703       if (dst.first()->is_Register()) {
1704         // gpr -> gpr
1705         __ mov(src.first()->as_Register(), dst.first()->as_Register());
1706         __ mov(src.second()->as_Register(), dst.second()->as_Register());
1707       } else {
1708         // gpr -> fpr
1709         // ought to be able to do a single store
1710         __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1711         __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1712         // ought to be able to do a single load
1713         __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1714         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1715       }
1716     } else if (dst.first()->is_Register()) {
1717       // fpr -> gpr
1718       // ought to be able to do a single store
1719       __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1720       // ought to be able to do a single load
1721       // REMEMBER first() is low address not LSB
1722       __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1723       if (dst.second()->is_Register()) {
1724         __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1725       } else {
1726         __ ld(FP, -4 + STACK_BIAS, L4);
1727         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1728       }
1729     } else {
1730       // fpr -> fpr
1731       // In theory these overlap but the ordering is such that this is likely a nop
1732       if ( src.first() != dst.first()) {
1733         __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1734       }
1735     }
1736   }
1737 }
1738 
1739 // Creates an inner frame if one hasn't already been created, and
1740 // saves a copy of the thread in L7_thread_cache
1741 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1742   if (!*already_created) {
1743     __ save_frame(0);
1744     // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1745     // Don't use save_thread because it smashes G2 and we merely want to save a
1746     // copy
1747     __ mov(G2_thread, L7_thread_cache);
1748     *already_created = true;
1749   }
1750 }
1751 
1752 // ---------------------------------------------------------------------------
1753 // Generate a native wrapper for a given method.  The method takes arguments
1754 // in the Java compiled code convention, marshals them to the native
1755 // convention (handlizes oops, etc), transitions to native, makes the call,
1756 // returns to java state (possibly blocking), unhandlizes any result and
1757 // returns.
1758 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1759                                                 methodHandle method,
1760                                                 int compile_id,
1761                                                 int total_in_args,
1762                                                 int comp_args_on_stack, // in VMRegStackSlots
1763                                                 BasicType *in_sig_bt,
1764                                                 VMRegPair *in_regs,
1765                                                 BasicType ret_type) {
1766 
1767   // Native nmethod wrappers never take possesion of the oop arguments.
1768   // So the caller will gc the arguments. The only thing we need an
1769   // oopMap for is if the call is static
1770   //
1771   // An OopMap for lock (and class if static), and one for the VM call itself
1772   OopMapSet *oop_maps = new OopMapSet();
1773   intptr_t start = (intptr_t)__ pc();
1774 
1775   // First thing make an ic check to see if we should even be here
1776   {
1777     Label L;
1778     const Register temp_reg = G3_scratch;
1779     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1780     __ verify_oop(O0);
1781     __ load_klass(O0, temp_reg);
1782     __ cmp(temp_reg, G5_inline_cache_reg);
1783     __ brx(Assembler::equal, true, Assembler::pt, L);
1784     __ delayed()->nop();
1785 
1786     __ jump_to(ic_miss, temp_reg);
1787     __ delayed()->nop();
1788     __ align(CodeEntryAlignment);
1789     __ bind(L);
1790   }
1791 
1792   int vep_offset = ((intptr_t)__ pc()) - start;
1793 
1794 #ifdef COMPILER1
1795   if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1796     // Object.hashCode can pull the hashCode from the header word
1797     // instead of doing a full VM transition once it's been computed.
1798     // Since hashCode is usually polymorphic at call sites we can't do
1799     // this optimization at the call site without a lot of work.
1800     Label slowCase;
1801     Register receiver             = O0;
1802     Register result               = O0;
1803     Register header               = G3_scratch;
1804     Register hash                 = G3_scratch; // overwrite header value with hash value
1805     Register mask                 = G1;         // to get hash field from header
1806 
1807     // Read the header and build a mask to get its hash field.  Give up if the object is not unlocked.
1808     // We depend on hash_mask being at most 32 bits and avoid the use of
1809     // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
1810     // vm: see markOop.hpp.
1811     __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
1812     __ sethi(markOopDesc::hash_mask, mask);
1813     __ btst(markOopDesc::unlocked_value, header);
1814     __ br(Assembler::zero, false, Assembler::pn, slowCase);
1815     if (UseBiasedLocking) {
1816       // Check if biased and fall through to runtime if so
1817       __ delayed()->nop();
1818       __ btst(markOopDesc::biased_lock_bit_in_place, header);
1819       __ br(Assembler::notZero, false, Assembler::pn, slowCase);
1820     }
1821     __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
1822 
1823     // Check for a valid (non-zero) hash code and get its value.
1824 #ifdef _LP64
1825     __ srlx(header, markOopDesc::hash_shift, hash);
1826 #else
1827     __ srl(header, markOopDesc::hash_shift, hash);
1828 #endif
1829     __ andcc(hash, mask, hash);
1830     __ br(Assembler::equal, false, Assembler::pn, slowCase);
1831     __ delayed()->nop();
1832 
1833     // leaf return.
1834     __ retl();
1835     __ delayed()->mov(hash, result);
1836     __ bind(slowCase);
1837   }
1838 #endif // COMPILER1
1839 
1840 
1841   // We have received a description of where all the java arg are located
1842   // on entry to the wrapper. We need to convert these args to where
1843   // the jni function will expect them. To figure out where they go
1844   // we convert the java signature to a C signature by inserting
1845   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1846 
1847   int total_c_args = total_in_args + 1;
1848   if (method->is_static()) {
1849     total_c_args++;
1850   }
1851 
1852   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1853   VMRegPair  * out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);
1854 
1855   int argc = 0;
1856   out_sig_bt[argc++] = T_ADDRESS;
1857   if (method->is_static()) {
1858     out_sig_bt[argc++] = T_OBJECT;
1859   }
1860 
1861   for (int i = 0; i < total_in_args ; i++ ) {
1862     out_sig_bt[argc++] = in_sig_bt[i];
1863   }
1864 
1865   // Now figure out where the args must be stored and how much stack space
1866   // they require (neglecting out_preserve_stack_slots but space for storing
1867   // the 1st six register arguments). It's weird see int_stk_helper.
1868   //
1869   int out_arg_slots;
1870   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1871 
1872   // Compute framesize for the wrapper.  We need to handlize all oops in
1873   // registers. We must create space for them here that is disjoint from
1874   // the windowed save area because we have no control over when we might
1875   // flush the window again and overwrite values that gc has since modified.
1876   // (The live window race)
1877   //
1878   // We always just allocate 6 word for storing down these object. This allow
1879   // us to simply record the base and use the Ireg number to decide which
1880   // slot to use. (Note that the reg number is the inbound number not the
1881   // outbound number).
1882   // We must shuffle args to match the native convention, and include var-args space.
1883 
1884   // Calculate the total number of stack slots we will need.
1885 
1886   // First count the abi requirement plus all of the outgoing args
1887   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1888 
1889   // Now the space for the inbound oop handle area
1890 
1891   int oop_handle_offset = stack_slots;
1892   stack_slots += 6*VMRegImpl::slots_per_word;
1893 
1894   // Now any space we need for handlizing a klass if static method
1895 
1896   int oop_temp_slot_offset = 0;
1897   int klass_slot_offset = 0;
1898   int klass_offset = -1;
1899   int lock_slot_offset = 0;
1900   bool is_static = false;
1901 
1902   if (method->is_static()) {
1903     klass_slot_offset = stack_slots;
1904     stack_slots += VMRegImpl::slots_per_word;
1905     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1906     is_static = true;
1907   }
1908 
1909   // Plus a lock if needed
1910 
1911   if (method->is_synchronized()) {
1912     lock_slot_offset = stack_slots;
1913     stack_slots += VMRegImpl::slots_per_word;
1914   }
1915 
1916   // Now a place to save return value or as a temporary for any gpr -> fpr moves
1917   stack_slots += 2;
1918 
1919   // Ok The space we have allocated will look like:
1920   //
1921   //
1922   // FP-> |                     |
1923   //      |---------------------|
1924   //      | 2 slots for moves   |
1925   //      |---------------------|
1926   //      | lock box (if sync)  |
1927   //      |---------------------| <- lock_slot_offset
1928   //      | klass (if static)   |
1929   //      |---------------------| <- klass_slot_offset
1930   //      | oopHandle area      |
1931   //      |---------------------| <- oop_handle_offset
1932   //      | outbound memory     |
1933   //      | based arguments     |
1934   //      |                     |
1935   //      |---------------------|
1936   //      | vararg area         |
1937   //      |---------------------|
1938   //      |                     |
1939   // SP-> | out_preserved_slots |
1940   //
1941   //
1942 
1943 
1944   // Now compute actual number of stack words we need rounding to make
1945   // stack properly aligned.
1946   stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
1947 
1948   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1949 
1950   // Generate stack overflow check before creating frame
1951   __ generate_stack_overflow_check(stack_size);
1952 
1953   // Generate a new frame for the wrapper.
1954   __ save(SP, -stack_size, SP);
1955 
1956   int frame_complete = ((intptr_t)__ pc()) - start;
1957 
1958   __ verify_thread();
1959 
1960 
1961   //
1962   // We immediately shuffle the arguments so that any vm call we have to
1963   // make from here on out (sync slow path, jvmti, etc.) we will have
1964   // captured the oops from our caller and have a valid oopMap for
1965   // them.
1966 
1967   // -----------------
1968   // The Grand Shuffle
1969   //
1970   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1971   // (derived from JavaThread* which is in L7_thread_cache) and, if static,
1972   // the class mirror instead of a receiver.  This pretty much guarantees that
1973   // register layout will not match.  We ignore these extra arguments during
1974   // the shuffle. The shuffle is described by the two calling convention
1975   // vectors we have in our possession. We simply walk the java vector to
1976   // get the source locations and the c vector to get the destinations.
1977   // Because we have a new window and the argument registers are completely
1978   // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
1979   // here.
1980 
1981   // This is a trick. We double the stack slots so we can claim
1982   // the oops in the caller's frame. Since we are sure to have
1983   // more args than the caller doubling is enough to make
1984   // sure we can capture all the incoming oop args from the
1985   // caller.
1986   //
1987   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1988   int c_arg = total_c_args - 1;
1989   // Record sp-based slot for receiver on stack for non-static methods
1990   int receiver_offset = -1;
1991 
1992   // We move the arguments backward because the floating point registers
1993   // destination will always be to a register with a greater or equal register
1994   // number or the stack.
1995 
1996 #ifdef ASSERT
1997   bool reg_destroyed[RegisterImpl::number_of_registers];
1998   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
1999   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2000     reg_destroyed[r] = false;
2001   }
2002   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2003     freg_destroyed[f] = false;
2004   }
2005 
2006 #endif /* ASSERT */
2007 
2008   for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
2009 
2010 #ifdef ASSERT
2011     if (in_regs[i].first()->is_Register()) {
2012       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2013     } else if (in_regs[i].first()->is_FloatRegister()) {
2014       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2015     }
2016     if (out_regs[c_arg].first()->is_Register()) {
2017       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2018     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2019       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2020     }
2021 #endif /* ASSERT */
2022 
2023     switch (in_sig_bt[i]) {
2024       case T_ARRAY:
2025       case T_OBJECT:
2026         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2027                     ((i == 0) && (!is_static)),
2028                     &receiver_offset);
2029         break;
2030       case T_VOID:
2031         break;
2032 
2033       case T_FLOAT:
2034         float_move(masm, in_regs[i], out_regs[c_arg]);
2035           break;
2036 
2037       case T_DOUBLE:
2038         assert( i + 1 < total_in_args &&
2039                 in_sig_bt[i + 1] == T_VOID &&
2040                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2041         double_move(masm, in_regs[i], out_regs[c_arg]);
2042         break;
2043 
2044       case T_LONG :
2045         long_move(masm, in_regs[i], out_regs[c_arg]);
2046         break;
2047 
2048       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2049 
2050       default:
2051         move32_64(masm, in_regs[i], out_regs[c_arg]);
2052     }
2053   }
2054 
2055   // Pre-load a static method's oop into O1.  Used both by locking code and
2056   // the normal JNI call code.
2057   if (method->is_static()) {
2058     __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
2059 
2060     // Now handlize the static class mirror in O1.  It's known not-null.
2061     __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2062     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2063     __ add(SP, klass_offset + STACK_BIAS, O1);
2064   }
2065 
2066 
2067   const Register L6_handle = L6;
2068 
2069   if (method->is_synchronized()) {
2070     __ mov(O1, L6_handle);
2071   }
2072 
2073   // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2074   // except O6/O7. So if we must call out we must push a new frame. We immediately
2075   // push a new frame and flush the windows.
2076 
2077 #ifdef _LP64
2078   intptr_t thepc = (intptr_t) __ pc();
2079   {
2080     address here = __ pc();
2081     // Call the next instruction
2082     __ call(here + 8, relocInfo::none);
2083     __ delayed()->nop();
2084   }
2085 #else
2086   intptr_t thepc = __ load_pc_address(O7, 0);
2087 #endif /* _LP64 */
2088 
2089   // We use the same pc/oopMap repeatedly when we call out
2090   oop_maps->add_gc_map(thepc - start, map);
2091 
2092   // O7 now has the pc loaded that we will use when we finally call to native.
2093 
2094   // Save thread in L7; it crosses a bunch of VM calls below
2095   // Don't use save_thread because it smashes G2 and we merely
2096   // want to save a copy
2097   __ mov(G2_thread, L7_thread_cache);
2098 
2099 
2100   // If we create an inner frame once is plenty
2101   // when we create it we must also save G2_thread
2102   bool inner_frame_created = false;
2103 
2104   // dtrace method entry support
2105   {
2106     SkipIfEqual skip_if(
2107       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2108     // create inner frame
2109     __ save_frame(0);
2110     __ mov(G2_thread, L7_thread_cache);
2111     __ set_oop_constant(JNIHandles::make_local(method()), O1);
2112     __ call_VM_leaf(L7_thread_cache,
2113          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2114          G2_thread, O1);
2115     __ restore();
2116   }
2117 
2118   // RedefineClasses() tracing support for obsolete method entry
2119   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2120     // create inner frame
2121     __ save_frame(0);
2122     __ mov(G2_thread, L7_thread_cache);
2123     __ set_oop_constant(JNIHandles::make_local(method()), O1);
2124     __ call_VM_leaf(L7_thread_cache,
2125          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2126          G2_thread, O1);
2127     __ restore();
2128   }
2129 
2130   // We are in the jni frame unless saved_frame is true in which case
2131   // we are in one frame deeper (the "inner" frame). If we are in the
2132   // "inner" frames the args are in the Iregs and if the jni frame then
2133   // they are in the Oregs.
2134   // If we ever need to go to the VM (for locking, jvmti) then
2135   // we will always be in the "inner" frame.
2136 
2137   // Lock a synchronized method
2138   int lock_offset = -1;         // Set if locked
2139   if (method->is_synchronized()) {
2140     Register Roop = O1;
2141     const Register L3_box = L3;
2142 
2143     create_inner_frame(masm, &inner_frame_created);
2144 
2145     __ ld_ptr(I1, 0, O1);
2146     Label done;
2147 
2148     lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2149     __ add(FP, lock_offset+STACK_BIAS, L3_box);
2150 #ifdef ASSERT
2151     if (UseBiasedLocking) {
2152       // making the box point to itself will make it clear it went unused
2153       // but also be obviously invalid
2154       __ st_ptr(L3_box, L3_box, 0);
2155     }
2156 #endif // ASSERT
2157     //
2158     // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2159     //
2160     __ compiler_lock_object(Roop, L1,    L3_box, L2);
2161     __ br(Assembler::equal, false, Assembler::pt, done);
2162     __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2163 
2164 
2165     // None of the above fast optimizations worked so we have to get into the
2166     // slow case of monitor enter.  Inline a special case of call_VM that
2167     // disallows any pending_exception.
2168     __ mov(Roop, O0);            // Need oop in O0
2169     __ mov(L3_box, O1);
2170 
2171     // Record last_Java_sp, in case the VM code releases the JVM lock.
2172 
2173     __ set_last_Java_frame(FP, I7);
2174 
2175     // do the call
2176     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2177     __ delayed()->mov(L7_thread_cache, O2);
2178 
2179     __ restore_thread(L7_thread_cache); // restore G2_thread
2180     __ reset_last_Java_frame();
2181 
2182 #ifdef ASSERT
2183     { Label L;
2184     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2185     __ br_null(O0, false, Assembler::pt, L);
2186     __ delayed()->nop();
2187     __ stop("no pending exception allowed on exit from IR::monitorenter");
2188     __ bind(L);
2189     }
2190 #endif
2191     __ bind(done);
2192   }
2193 
2194 
2195   // Finally just about ready to make the JNI call
2196 
2197   __ flush_windows();
2198   if (inner_frame_created) {
2199     __ restore();
2200   } else {
2201     // Store only what we need from this frame
2202     // QQQ I think that non-v9 (like we care) we don't need these saves
2203     // either as the flush traps and the current window goes too.
2204     __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2205     __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2206   }
2207 
2208   // get JNIEnv* which is first argument to native
2209 
2210   __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2211 
2212   // Use that pc we placed in O7 a while back as the current frame anchor
2213 
2214   __ set_last_Java_frame(SP, O7);
2215 
2216   // Transition from _thread_in_Java to _thread_in_native.
2217   __ set(_thread_in_native, G3_scratch);
2218   __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2219 
2220   // We flushed the windows ages ago now mark them as flushed
2221 
2222   // mark windows as flushed
2223   __ set(JavaFrameAnchor::flushed, G3_scratch);
2224 
2225   Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2226 
2227 #ifdef _LP64
2228   AddressLiteral dest(method->native_function());
2229   __ relocate(relocInfo::runtime_call_type);
2230   __ jumpl_to(dest, O7, O7);
2231 #else
2232   __ call(method->native_function(), relocInfo::runtime_call_type);
2233 #endif
2234   __ delayed()->st(G3_scratch, flags);
2235 
2236   __ restore_thread(L7_thread_cache); // restore G2_thread
2237 
2238   // Unpack native results.  For int-types, we do any needed sign-extension
2239   // and move things into I0.  The return value there will survive any VM
2240   // calls for blocking or unlocking.  An FP or OOP result (handle) is done
2241   // specially in the slow-path code.
2242   switch (ret_type) {
2243   case T_VOID:    break;        // Nothing to do!
2244   case T_FLOAT:   break;        // Got it where we want it (unless slow-path)
2245   case T_DOUBLE:  break;        // Got it where we want it (unless slow-path)
2246   // In 64 bits build result is in O0, in O0, O1 in 32bit build
2247   case T_LONG:
2248 #ifndef _LP64
2249                   __ mov(O1, I1);
2250 #endif
2251                   // Fall thru
2252   case T_OBJECT:                // Really a handle
2253   case T_ARRAY:
2254   case T_INT:
2255                   __ mov(O0, I0);
2256                   break;
2257   case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2258   case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
2259   case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
2260   case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
2261     break;                      // Cannot de-handlize until after reclaiming jvm_lock
2262   default:
2263     ShouldNotReachHere();
2264   }
2265 
2266   // must we block?
2267 
2268   // Block, if necessary, before resuming in _thread_in_Java state.
2269   // In order for GC to work, don't clear the last_Java_sp until after blocking.
2270   { Label no_block;
2271     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2272 
2273     // Switch thread to "native transition" state before reading the synchronization state.
2274     // This additional state is necessary because reading and testing the synchronization
2275     // state is not atomic w.r.t. GC, as this scenario demonstrates:
2276     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2277     //     VM thread changes sync state to synchronizing and suspends threads for GC.
2278     //     Thread A is resumed to finish this native method, but doesn't block here since it
2279     //     didn't see any synchronization is progress, and escapes.
2280     __ set(_thread_in_native_trans, G3_scratch);
2281     __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2282     if(os::is_MP()) {
2283       if (UseMembar) {
2284         // Force this write out before the read below
2285         __ membar(Assembler::StoreLoad);
2286       } else {
2287         // Write serialization page so VM thread can do a pseudo remote membar.
2288         // We use the current thread pointer to calculate a thread specific
2289         // offset to write to within the page. This minimizes bus traffic
2290         // due to cache line collision.
2291         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2292       }
2293     }
2294     __ load_contents(sync_state, G3_scratch);
2295     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2296 
2297     Label L;
2298     Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2299     __ br(Assembler::notEqual, false, Assembler::pn, L);
2300     __ delayed()->ld(suspend_state, G3_scratch);
2301     __ cmp(G3_scratch, 0);
2302     __ br(Assembler::equal, false, Assembler::pt, no_block);
2303     __ delayed()->nop();
2304     __ bind(L);
2305 
2306     // Block.  Save any potential method result value before the operation and
2307     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2308     // lets us share the oopMap we used when we went native rather the create
2309     // a distinct one for this pc
2310     //
2311     save_native_result(masm, ret_type, stack_slots);
2312     __ call_VM_leaf(L7_thread_cache,
2313                     CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2314                     G2_thread);
2315 
2316     // Restore any method result value
2317     restore_native_result(masm, ret_type, stack_slots);
2318     __ bind(no_block);
2319   }
2320 
2321   // thread state is thread_in_native_trans. Any safepoint blocking has already
2322   // happened so we can now change state to _thread_in_Java.
2323 
2324 
2325   __ set(_thread_in_Java, G3_scratch);
2326   __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2327 
2328 
2329   Label no_reguard;
2330   __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2331   __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
2332   __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
2333   __ delayed()->nop();
2334 
2335     save_native_result(masm, ret_type, stack_slots);
2336   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2337   __ delayed()->nop();
2338 
2339   __ restore_thread(L7_thread_cache); // restore G2_thread
2340     restore_native_result(masm, ret_type, stack_slots);
2341 
2342   __ bind(no_reguard);
2343 
2344   // Handle possible exception (will unlock if necessary)
2345 
2346   // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2347 
2348   // Unlock
2349   if (method->is_synchronized()) {
2350     Label done;
2351     Register I2_ex_oop = I2;
2352     const Register L3_box = L3;
2353     // Get locked oop from the handle we passed to jni
2354     __ ld_ptr(L6_handle, 0, L4);
2355     __ add(SP, lock_offset+STACK_BIAS, L3_box);
2356     // Must save pending exception around the slow-path VM call.  Since it's a
2357     // leaf call, the pending exception (if any) can be kept in a register.
2358     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2359     // Now unlock
2360     //                       (Roop, Rmark, Rbox,   Rscratch)
2361     __ compiler_unlock_object(L4,   L1,    L3_box, L2);
2362     __ br(Assembler::equal, false, Assembler::pt, done);
2363     __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2364 
2365     // save and restore any potential method result value around the unlocking
2366     // operation.  Will save in I0 (or stack for FP returns).
2367     save_native_result(masm, ret_type, stack_slots);
2368 
2369     // Must clear pending-exception before re-entering the VM.  Since this is
2370     // a leaf call, pending-exception-oop can be safely kept in a register.
2371     __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2372 
2373     // slow case of monitor enter.  Inline a special case of call_VM that
2374     // disallows any pending_exception.
2375     __ mov(L3_box, O1);
2376 
2377     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2378     __ delayed()->mov(L4, O0);              // Need oop in O0
2379 
2380     __ restore_thread(L7_thread_cache); // restore G2_thread
2381 
2382 #ifdef ASSERT
2383     { Label L;
2384     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2385     __ br_null(O0, false, Assembler::pt, L);
2386     __ delayed()->nop();
2387     __ stop("no pending exception allowed on exit from IR::monitorexit");
2388     __ bind(L);
2389     }
2390 #endif
2391     restore_native_result(masm, ret_type, stack_slots);
2392     // check_forward_pending_exception jump to forward_exception if any pending
2393     // exception is set.  The forward_exception routine expects to see the
2394     // exception in pending_exception and not in a register.  Kind of clumsy,
2395     // since all folks who branch to forward_exception must have tested
2396     // pending_exception first and hence have it in a register already.
2397     __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2398     __ bind(done);
2399   }
2400 
2401   // Tell dtrace about this method exit
2402   {
2403     SkipIfEqual skip_if(
2404       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2405     save_native_result(masm, ret_type, stack_slots);
2406     __ set_oop_constant(JNIHandles::make_local(method()), O1);
2407     __ call_VM_leaf(L7_thread_cache,
2408        CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2409        G2_thread, O1);
2410     restore_native_result(masm, ret_type, stack_slots);
2411   }
2412 
2413   // Clear "last Java frame" SP and PC.
2414   __ verify_thread(); // G2_thread must be correct
2415   __ reset_last_Java_frame();
2416 
2417   // Unpack oop result
2418   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2419       Label L;
2420       __ addcc(G0, I0, G0);
2421       __ brx(Assembler::notZero, true, Assembler::pt, L);
2422       __ delayed()->ld_ptr(I0, 0, I0);
2423       __ mov(G0, I0);
2424       __ bind(L);
2425       __ verify_oop(I0);
2426   }
2427 
2428   // reset handle block
2429   __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2430   __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2431 
2432   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2433   check_forward_pending_exception(masm, G3_scratch);
2434 
2435 
2436   // Return
2437 
2438 #ifndef _LP64
2439   if (ret_type == T_LONG) {
2440 
2441     // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2442     __ sllx(I0, 32, G1);          // Shift bits into high G1
2443     __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
2444     __ or3 (I1, G1, G1);          // OR 64 bits into G1
2445   }
2446 #endif
2447 
2448   __ ret();
2449   __ delayed()->restore();
2450 
2451   __ flush();
2452 
2453   nmethod *nm = nmethod::new_native_nmethod(method,
2454                                             compile_id,
2455                                             masm->code(),
2456                                             vep_offset,
2457                                             frame_complete,
2458                                             stack_slots / VMRegImpl::slots_per_word,
2459                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2460                                             in_ByteSize(lock_offset),
2461                                             oop_maps);
2462   return nm;
2463 
2464 }
2465 
2466 #ifdef HAVE_DTRACE_H
2467 // ---------------------------------------------------------------------------
2468 // Generate a dtrace nmethod for a given signature.  The method takes arguments
2469 // in the Java compiled code convention, marshals them to the native
2470 // abi and then leaves nops at the position you would expect to call a native
2471 // function. When the probe is enabled the nops are replaced with a trap
2472 // instruction that dtrace inserts and the trace will cause a notification
2473 // to dtrace.
2474 //
2475 // The probes are only able to take primitive types and java/lang/String as
2476 // arguments.  No other java types are allowed. Strings are converted to utf8
2477 // strings so that from dtrace point of view java strings are converted to C
2478 // strings. There is an arbitrary fixed limit on the total space that a method
2479 // can use for converting the strings. (256 chars per string in the signature).
2480 // So any java string larger then this is truncated.
2481 
2482 static int  fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2483 static bool offsets_initialized = false;
2484 
2485 static VMRegPair reg64_to_VMRegPair(Register r) {
2486   VMRegPair ret;
2487   if (wordSize == 8) {
2488     ret.set2(r->as_VMReg());
2489   } else {
2490     ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
2491   }
2492   return ret;
2493 }
2494 
2495 
2496 nmethod *SharedRuntime::generate_dtrace_nmethod(
2497     MacroAssembler *masm, methodHandle method) {
2498 
2499 
2500   // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2501   // be single threaded in this method.
2502   assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2503 
2504   // Fill in the signature array, for the calling-convention call.
2505   int total_args_passed = method->size_of_parameters();
2506 
2507   BasicType* in_sig_bt  = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2508   VMRegPair  *in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2509 
2510   // The signature we are going to use for the trap that dtrace will see
2511   // java/lang/String is converted. We drop "this" and any other object
2512   // is converted to NULL.  (A one-slot java/lang/Long object reference
2513   // is converted to a two-slot long, which is why we double the allocation).
2514   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2515   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2516 
2517   int i=0;
2518   int total_strings = 0;
2519   int first_arg_to_pass = 0;
2520   int total_c_args = 0;
2521 
2522   // Skip the receiver as dtrace doesn't want to see it
2523   if( !method->is_static() ) {
2524     in_sig_bt[i++] = T_OBJECT;
2525     first_arg_to_pass = 1;
2526   }
2527 
2528   SignatureStream ss(method->signature());
2529   for ( ; !ss.at_return_type(); ss.next()) {
2530     BasicType bt = ss.type();
2531     in_sig_bt[i++] = bt;  // Collect remaining bits of signature
2532     out_sig_bt[total_c_args++] = bt;
2533     if( bt == T_OBJECT) {
2534       Symbol* s = ss.as_symbol_or_null();
2535       if (s == vmSymbols::java_lang_String()) {
2536         total_strings++;
2537         out_sig_bt[total_c_args-1] = T_ADDRESS;
2538       } else if (s == vmSymbols::java_lang_Boolean() ||
2539                  s == vmSymbols::java_lang_Byte()) {
2540         out_sig_bt[total_c_args-1] = T_BYTE;
2541       } else if (s == vmSymbols::java_lang_Character() ||
2542                  s == vmSymbols::java_lang_Short()) {
2543         out_sig_bt[total_c_args-1] = T_SHORT;
2544       } else if (s == vmSymbols::java_lang_Integer() ||
2545                  s == vmSymbols::java_lang_Float()) {
2546         out_sig_bt[total_c_args-1] = T_INT;
2547       } else if (s == vmSymbols::java_lang_Long() ||
2548                  s == vmSymbols::java_lang_Double()) {
2549         out_sig_bt[total_c_args-1] = T_LONG;
2550         out_sig_bt[total_c_args++] = T_VOID;
2551       }
2552     } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2553       in_sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
2554       // We convert double to long
2555       out_sig_bt[total_c_args-1] = T_LONG;
2556       out_sig_bt[total_c_args++] = T_VOID;
2557     } else if ( bt == T_FLOAT) {
2558       // We convert float to int
2559       out_sig_bt[total_c_args-1] = T_INT;
2560     }
2561   }
2562 
2563   assert(i==total_args_passed, "validly parsed signature");
2564 
2565   // Now get the compiled-Java layout as input arguments
2566   int comp_args_on_stack;
2567   comp_args_on_stack = SharedRuntime::java_calling_convention(
2568       in_sig_bt, in_regs, total_args_passed, false);
2569 
2570   // We have received a description of where all the java arg are located
2571   // on entry to the wrapper. We need to convert these args to where
2572   // the a  native (non-jni) function would expect them. To figure out
2573   // where they go we convert the java signature to a C signature and remove
2574   // T_VOID for any long/double we might have received.
2575 
2576 
2577   // Now figure out where the args must be stored and how much stack space
2578   // they require (neglecting out_preserve_stack_slots but space for storing
2579   // the 1st six register arguments). It's weird see int_stk_helper.
2580   //
2581   int out_arg_slots;
2582   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2583 
2584   // Calculate the total number of stack slots we will need.
2585 
2586   // First count the abi requirement plus all of the outgoing args
2587   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2588 
2589   // Plus a temp for possible converion of float/double/long register args
2590 
2591   int conversion_temp = stack_slots;
2592   stack_slots += 2;
2593 
2594 
2595   // Now space for the string(s) we must convert
2596 
2597   int string_locs = stack_slots;
2598   stack_slots += total_strings *
2599                    (max_dtrace_string_size / VMRegImpl::stack_slot_size);
2600 
2601   // Ok The space we have allocated will look like:
2602   //
2603   //
2604   // FP-> |                     |
2605   //      |---------------------|
2606   //      | string[n]           |
2607   //      |---------------------| <- string_locs[n]
2608   //      | string[n-1]         |
2609   //      |---------------------| <- string_locs[n-1]
2610   //      | ...                 |
2611   //      | ...                 |
2612   //      |---------------------| <- string_locs[1]
2613   //      | string[0]           |
2614   //      |---------------------| <- string_locs[0]
2615   //      | temp                |
2616   //      |---------------------| <- conversion_temp
2617   //      | outbound memory     |
2618   //      | based arguments     |
2619   //      |                     |
2620   //      |---------------------|
2621   //      |                     |
2622   // SP-> | out_preserved_slots |
2623   //
2624   //
2625 
2626   // Now compute actual number of stack words we need rounding to make
2627   // stack properly aligned.
2628   stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2629 
2630   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2631 
2632   intptr_t start = (intptr_t)__ pc();
2633 
2634   // First thing make an ic check to see if we should even be here
2635 
2636   {
2637     Label L;
2638     const Register temp_reg = G3_scratch;
2639     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2640     __ verify_oop(O0);
2641     __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
2642     __ cmp(temp_reg, G5_inline_cache_reg);
2643     __ brx(Assembler::equal, true, Assembler::pt, L);
2644     __ delayed()->nop();
2645 
2646     __ jump_to(ic_miss, temp_reg);
2647     __ delayed()->nop();
2648     __ align(CodeEntryAlignment);
2649     __ bind(L);
2650   }
2651 
2652   int vep_offset = ((intptr_t)__ pc()) - start;
2653 
2654 
2655   // The instruction at the verified entry point must be 5 bytes or longer
2656   // because it can be patched on the fly by make_non_entrant. The stack bang
2657   // instruction fits that requirement.
2658 
2659   // Generate stack overflow check before creating frame
2660   __ generate_stack_overflow_check(stack_size);
2661 
2662   assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
2663          "valid size for make_non_entrant");
2664 
2665   // Generate a new frame for the wrapper.
2666   __ save(SP, -stack_size, SP);
2667 
2668   // Frame is now completed as far a size and linkage.
2669 
2670   int frame_complete = ((intptr_t)__ pc()) - start;
2671 
2672 #ifdef ASSERT
2673   bool reg_destroyed[RegisterImpl::number_of_registers];
2674   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2675   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2676     reg_destroyed[r] = false;
2677   }
2678   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2679     freg_destroyed[f] = false;
2680   }
2681 
2682 #endif /* ASSERT */
2683 
2684   VMRegPair zero;
2685   const Register g0 = G0; // without this we get a compiler warning (why??)
2686   zero.set2(g0->as_VMReg());
2687 
2688   int c_arg, j_arg;
2689 
2690   Register conversion_off = noreg;
2691 
2692   for (j_arg = first_arg_to_pass, c_arg = 0 ;
2693        j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2694 
2695     VMRegPair src = in_regs[j_arg];
2696     VMRegPair dst = out_regs[c_arg];
2697 
2698 #ifdef ASSERT
2699     if (src.first()->is_Register()) {
2700       assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
2701     } else if (src.first()->is_FloatRegister()) {
2702       assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
2703                                                FloatRegisterImpl::S)], "ack!");
2704     }
2705     if (dst.first()->is_Register()) {
2706       reg_destroyed[dst.first()->as_Register()->encoding()] = true;
2707     } else if (dst.first()->is_FloatRegister()) {
2708       freg_destroyed[dst.first()->as_FloatRegister()->encoding(
2709                                                  FloatRegisterImpl::S)] = true;
2710     }
2711 #endif /* ASSERT */
2712 
2713     switch (in_sig_bt[j_arg]) {
2714       case T_ARRAY:
2715       case T_OBJECT:
2716         {
2717           if (out_sig_bt[c_arg] == T_BYTE  || out_sig_bt[c_arg] == T_SHORT ||
2718               out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2719             // need to unbox a one-slot value
2720             Register in_reg = L0;
2721             Register tmp = L2;
2722             if ( src.first()->is_reg() ) {
2723               in_reg = src.first()->as_Register();
2724             } else {
2725               assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
2726                      "must be");
2727               __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
2728             }
2729             // If the final destination is an acceptable register
2730             if ( dst.first()->is_reg() ) {
2731               if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
2732                 tmp = dst.first()->as_Register();
2733               }
2734             }
2735 
2736             Label skipUnbox;
2737             if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
2738               __ mov(G0, tmp->successor());
2739             }
2740             __ br_null(in_reg, true, Assembler::pn, skipUnbox);
2741             __ delayed()->mov(G0, tmp);
2742 
2743             BasicType bt = out_sig_bt[c_arg];
2744             int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2745             switch (bt) {
2746                 case T_BYTE:
2747                   __ ldub(in_reg, box_offset, tmp); break;
2748                 case T_SHORT:
2749                   __ lduh(in_reg, box_offset, tmp); break;
2750                 case T_INT:
2751                   __ ld(in_reg, box_offset, tmp); break;
2752                 case T_LONG:
2753                   __ ld_long(in_reg, box_offset, tmp); break;
2754                 default: ShouldNotReachHere();
2755             }
2756 
2757             __ bind(skipUnbox);
2758             // If tmp wasn't final destination copy to final destination
2759             if (tmp == L2) {
2760               VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
2761               if (out_sig_bt[c_arg] == T_LONG) {
2762                 long_move(masm, tmp_as_VM, dst);
2763               } else {
2764                 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
2765               }
2766             }
2767             if (out_sig_bt[c_arg] == T_LONG) {
2768               assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2769               ++c_arg; // move over the T_VOID to keep the loop indices in sync
2770             }
2771           } else if (out_sig_bt[c_arg] == T_ADDRESS) {
2772             Register s =
2773                 src.first()->is_reg() ? src.first()->as_Register() : L2;
2774             Register d =
2775                 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2776 
2777             // We store the oop now so that the conversion pass can reach
2778             // while in the inner frame. This will be the only store if
2779             // the oop is NULL.
2780             if (s != L2) {
2781               // src is register
2782               if (d != L2) {
2783                 // dst is register
2784                 __ mov(s, d);
2785               } else {
2786                 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2787                           STACK_BIAS), "must be");
2788                 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
2789               }
2790             } else {
2791                 // src not a register
2792                 assert(Assembler::is_simm13(reg2offset(src.first()) +
2793                            STACK_BIAS), "must be");
2794                 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
2795                 if (d == L2) {
2796                   assert(Assembler::is_simm13(reg2offset(dst.first()) +
2797                              STACK_BIAS), "must be");
2798                   __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
2799                 }
2800             }
2801           } else if (out_sig_bt[c_arg] != T_VOID) {
2802             // Convert the arg to NULL
2803             if (dst.first()->is_reg()) {
2804               __ mov(G0, dst.first()->as_Register());
2805             } else {
2806               assert(Assembler::is_simm13(reg2offset(dst.first()) +
2807                          STACK_BIAS), "must be");
2808               __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
2809             }
2810           }
2811         }
2812         break;
2813       case T_VOID:
2814         break;
2815 
2816       case T_FLOAT:
2817         if (src.first()->is_stack()) {
2818           // Stack to stack/reg is simple
2819           move32_64(masm, src, dst);
2820         } else {
2821           if (dst.first()->is_reg()) {
2822             // freg -> reg
2823             int off =
2824               STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2825             Register d = dst.first()->as_Register();
2826             if (Assembler::is_simm13(off)) {
2827               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2828                      SP, off);
2829               __ ld(SP, off, d);
2830             } else {
2831               if (conversion_off == noreg) {
2832                 __ set(off, L6);
2833                 conversion_off = L6;
2834               }
2835               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2836                      SP, conversion_off);
2837               __ ld(SP, conversion_off , d);
2838             }
2839           } else {
2840             // freg -> mem
2841             int off = STACK_BIAS + reg2offset(dst.first());
2842             if (Assembler::is_simm13(off)) {
2843               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2844                      SP, off);
2845             } else {
2846               if (conversion_off == noreg) {
2847                 __ set(off, L6);
2848                 conversion_off = L6;
2849               }
2850               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2851                      SP, conversion_off);
2852             }
2853           }
2854         }
2855         break;
2856 
2857       case T_DOUBLE:
2858         assert( j_arg + 1 < total_args_passed &&
2859                 in_sig_bt[j_arg + 1] == T_VOID &&
2860                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2861         if (src.first()->is_stack()) {
2862           // Stack to stack/reg is simple
2863           long_move(masm, src, dst);
2864         } else {
2865           Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2866 
2867           // Destination could be an odd reg on 32bit in which case
2868           // we can't load direct to the destination.
2869 
2870           if (!d->is_even() && wordSize == 4) {
2871             d = L2;
2872           }
2873           int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2874           if (Assembler::is_simm13(off)) {
2875             __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2876                    SP, off);
2877             __ ld_long(SP, off, d);
2878           } else {
2879             if (conversion_off == noreg) {
2880               __ set(off, L6);
2881               conversion_off = L6;
2882             }
2883             __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2884                    SP, conversion_off);
2885             __ ld_long(SP, conversion_off, d);
2886           }
2887           if (d == L2) {
2888             long_move(masm, reg64_to_VMRegPair(L2), dst);
2889           }
2890         }
2891         break;
2892 
2893       case T_LONG :
2894         // 32bit can't do a split move of something like g1 -> O0, O1
2895         // so use a memory temp
2896         if (src.is_single_phys_reg() && wordSize == 4) {
2897           Register tmp = L2;
2898           if (dst.first()->is_reg() &&
2899               (wordSize == 8 || dst.first()->as_Register()->is_even())) {
2900             tmp = dst.first()->as_Register();
2901           }
2902 
2903           int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2904           if (Assembler::is_simm13(off)) {
2905             __ stx(src.first()->as_Register(), SP, off);
2906             __ ld_long(SP, off, tmp);
2907           } else {
2908             if (conversion_off == noreg) {
2909               __ set(off, L6);
2910               conversion_off = L6;
2911             }
2912             __ stx(src.first()->as_Register(), SP, conversion_off);
2913             __ ld_long(SP, conversion_off, tmp);
2914           }
2915 
2916           if (tmp == L2) {
2917             long_move(masm, reg64_to_VMRegPair(L2), dst);
2918           }
2919         } else {
2920           long_move(masm, src, dst);
2921         }
2922         break;
2923 
2924       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2925 
2926       default:
2927         move32_64(masm, src, dst);
2928     }
2929   }
2930 
2931 
2932   // If we have any strings we must store any register based arg to the stack
2933   // This includes any still live xmm registers too.
2934 
2935   if (total_strings > 0 ) {
2936 
2937     // protect all the arg registers
2938     __ save_frame(0);
2939     __ mov(G2_thread, L7_thread_cache);
2940     const Register L2_string_off = L2;
2941 
2942     // Get first string offset
2943     __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
2944 
2945     for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
2946       if (out_sig_bt[c_arg] == T_ADDRESS) {
2947 
2948         VMRegPair dst = out_regs[c_arg];
2949         const Register d = dst.first()->is_reg() ?
2950             dst.first()->as_Register()->after_save() : noreg;
2951 
2952         // It's a string the oop and it was already copied to the out arg
2953         // position
2954         if (d != noreg) {
2955           __ mov(d, O0);
2956         } else {
2957           assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
2958                  "must be");
2959           __ ld_ptr(FP,  reg2offset(dst.first()) + STACK_BIAS, O0);
2960         }
2961         Label skip;
2962 
2963         __ br_null(O0, false, Assembler::pn, skip);
2964         __ delayed()->add(FP, L2_string_off, O1);
2965 
2966         if (d != noreg) {
2967           __ mov(O1, d);
2968         } else {
2969           assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
2970                  "must be");
2971           __ st_ptr(O1, FP,  reg2offset(dst.first()) + STACK_BIAS);
2972         }
2973 
2974         __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
2975                 relocInfo::runtime_call_type);
2976         __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
2977 
2978         __ bind(skip);
2979 
2980       }
2981 
2982     }
2983     __ mov(L7_thread_cache, G2_thread);
2984     __ restore();
2985 
2986   }
2987 
2988 
2989   // Ok now we are done. Need to place the nop that dtrace wants in order to
2990   // patch in the trap
2991 
2992   int patch_offset = ((intptr_t)__ pc()) - start;
2993 
2994   __ nop();
2995 
2996 
2997   // Return
2998 
2999   __ ret();
3000   __ delayed()->restore();
3001 
3002   __ flush();
3003 
3004   nmethod *nm = nmethod::new_dtrace_nmethod(
3005       method, masm->code(), vep_offset, patch_offset, frame_complete,
3006       stack_slots / VMRegImpl::slots_per_word);
3007   return nm;
3008 
3009 }
3010 
3011 #endif // HAVE_DTRACE_H
3012 
3013 // this function returns the adjust size (in number of words) to a c2i adapter
3014 // activation for use during deoptimization
3015 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3016   assert(callee_locals >= callee_parameters,
3017           "test and remove; got more parms than locals");
3018   if (callee_locals < callee_parameters)
3019     return 0;                   // No adjustment for negative locals
3020   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3021   return round_to(diff, WordsPerLong);
3022 }
3023 
3024 // "Top of Stack" slots that may be unused by the calling convention but must
3025 // otherwise be preserved.
3026 // On Intel these are not necessary and the value can be zero.
3027 // On Sparc this describes the words reserved for storing a register window
3028 // when an interrupt occurs.
3029 uint SharedRuntime::out_preserve_stack_slots() {
3030   return frame::register_save_words * VMRegImpl::slots_per_word;
3031 }
3032 
3033 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
3034 //
3035 // Common out the new frame generation for deopt and uncommon trap
3036 //
3037   Register        G3pcs              = G3_scratch; // Array of new pcs (input)
3038   Register        Oreturn0           = O0;
3039   Register        Oreturn1           = O1;
3040   Register        O2UnrollBlock      = O2;
3041   Register        O3array            = O3;         // Array of frame sizes (input)
3042   Register        O4array_size       = O4;         // number of frames (input)
3043   Register        O7frame_size       = O7;         // number of frames (input)
3044 
3045   __ ld_ptr(O3array, 0, O7frame_size);
3046   __ sub(G0, O7frame_size, O7frame_size);
3047   __ save(SP, O7frame_size, SP);
3048   __ ld_ptr(G3pcs, 0, I7);                      // load frame's new pc
3049 
3050   #ifdef ASSERT
3051   // make sure that the frames are aligned properly
3052 #ifndef _LP64
3053   __ btst(wordSize*2-1, SP);
3054   __ breakpoint_trap(Assembler::notZero);
3055 #endif
3056   #endif
3057 
3058   // Deopt needs to pass some extra live values from frame to frame
3059 
3060   if (deopt) {
3061     __ mov(Oreturn0->after_save(), Oreturn0);
3062     __ mov(Oreturn1->after_save(), Oreturn1);
3063   }
3064 
3065   __ mov(O4array_size->after_save(), O4array_size);
3066   __ sub(O4array_size, 1, O4array_size);
3067   __ mov(O3array->after_save(), O3array);
3068   __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
3069   __ add(G3pcs, wordSize, G3pcs);               // point to next pc value
3070 
3071   #ifdef ASSERT
3072   // trash registers to show a clear pattern in backtraces
3073   __ set(0xDEAD0000, I0);
3074   __ add(I0,  2, I1);
3075   __ add(I0,  4, I2);
3076   __ add(I0,  6, I3);
3077   __ add(I0,  8, I4);
3078   // Don't touch I5 could have valuable savedSP
3079   __ set(0xDEADBEEF, L0);
3080   __ mov(L0, L1);
3081   __ mov(L0, L2);
3082   __ mov(L0, L3);
3083   __ mov(L0, L4);
3084   __ mov(L0, L5);
3085 
3086   // trash the return value as there is nothing to return yet
3087   __ set(0xDEAD0001, O7);
3088   #endif
3089 
3090   __ mov(SP, O5_savedSP);
3091 }
3092 
3093 
3094 static void make_new_frames(MacroAssembler* masm, bool deopt) {
3095   //
3096   // loop through the UnrollBlock info and create new frames
3097   //
3098   Register        G3pcs              = G3_scratch;
3099   Register        Oreturn0           = O0;
3100   Register        Oreturn1           = O1;
3101   Register        O2UnrollBlock      = O2;
3102   Register        O3array            = O3;
3103   Register        O4array_size       = O4;
3104   Label           loop;
3105 
3106   // Before we make new frames, check to see if stack is available.
3107   // Do this after the caller's return address is on top of stack
3108   if (UseStackBanging) {
3109     // Get total frame size for interpreted frames
3110     __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
3111     __ bang_stack_size(O4, O3, G3_scratch);
3112   }
3113 
3114   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
3115   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
3116   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
3117 
3118   // Adjust old interpreter frame to make space for new frame's extra java locals
3119   //
3120   // We capture the original sp for the transition frame only because it is needed in
3121   // order to properly calculate interpreter_sp_adjustment. Even though in real life
3122   // every interpreter frame captures a savedSP it is only needed at the transition
3123   // (fortunately). If we had to have it correct everywhere then we would need to
3124   // be told the sp_adjustment for each frame we create. If the frame size array
3125   // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3126   // for each frame we create and keep up the illusion every where.
3127   //
3128 
3129   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
3130   __ mov(SP, O5_savedSP);       // remember initial sender's original sp before adjustment
3131   __ sub(SP, O7, SP);
3132 
3133 #ifdef ASSERT
3134   // make sure that there is at least one entry in the array
3135   __ tst(O4array_size);
3136   __ breakpoint_trap(Assembler::zero);
3137 #endif
3138 
3139   // Now push the new interpreter frames
3140   __ bind(loop);
3141 
3142   // allocate a new frame, filling the registers
3143 
3144   gen_new_frame(masm, deopt);        // allocate an interpreter frame
3145 
3146   __ tst(O4array_size);
3147   __ br(Assembler::notZero, false, Assembler::pn, loop);
3148   __ delayed()->add(O3array, wordSize, O3array);
3149   __ ld_ptr(G3pcs, 0, O7);                      // load final frame new pc
3150 
3151 }
3152 
3153 //------------------------------generate_deopt_blob----------------------------
3154 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3155 // instead.
3156 void SharedRuntime::generate_deopt_blob() {
3157   // allocate space for the code
3158   ResourceMark rm;
3159   // setup code generation tools
3160   int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3161 #ifdef _LP64
3162   CodeBuffer buffer("deopt_blob", 2100+pad, 512);
3163 #else
3164   // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
3165   // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
3166   CodeBuffer buffer("deopt_blob", 1600+pad, 512);
3167 #endif /* _LP64 */
3168   MacroAssembler* masm               = new MacroAssembler(&buffer);
3169   FloatRegister   Freturn0           = F0;
3170   Register        Greturn1           = G1;
3171   Register        Oreturn0           = O0;
3172   Register        Oreturn1           = O1;
3173   Register        O2UnrollBlock      = O2;
3174   Register        L0deopt_mode       = L0;
3175   Register        G4deopt_mode       = G4_scratch;
3176   int             frame_size_words;
3177   Address         saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
3178 #if !defined(_LP64) && defined(COMPILER2)
3179   Address         saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
3180 #endif
3181   Label           cont;
3182 
3183   OopMapSet *oop_maps = new OopMapSet();
3184 
3185   //
3186   // This is the entry point for code which is returning to a de-optimized
3187   // frame.
3188   // The steps taken by this frame are as follows:
3189   //   - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
3190   //     and all potentially live registers (at a pollpoint many registers can be live).
3191   //
3192   //   - call the C routine: Deoptimization::fetch_unroll_info (this function
3193   //     returns information about the number and size of interpreter frames
3194   //     which are equivalent to the frame which is being deoptimized)
3195   //   - deallocate the unpack frame, restoring only results values. Other
3196   //     volatile registers will now be captured in the vframeArray as needed.
3197   //   - deallocate the deoptimization frame
3198   //   - in a loop using the information returned in the previous step
3199   //     push new interpreter frames (take care to propagate the return
3200   //     values through each new frame pushed)
3201   //   - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3202   //   - call the C routine: Deoptimization::unpack_frames (this function
3203   //     lays out values on the interpreter frame which was just created)
3204   //   - deallocate the dummy unpack_frame
3205   //   - ensure that all the return values are correctly set and then do
3206   //     a return to the interpreter entry point
3207   //
3208   // Refer to the following methods for more information:
3209   //   - Deoptimization::fetch_unroll_info
3210   //   - Deoptimization::unpack_frames
3211 
3212   OopMap* map = NULL;
3213 
3214   int start = __ offset();
3215 
3216   // restore G2, the trampoline destroyed it
3217   __ get_thread();
3218 
3219   // On entry we have been called by the deoptimized nmethod with a call that
3220   // replaced the original call (or safepoint polling location) so the deoptimizing
3221   // pc is now in O7. Return values are still in the expected places
3222 
3223   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3224   __ ba(false, cont);
3225   __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
3226 
3227   int exception_offset = __ offset() - start;
3228 
3229   // restore G2, the trampoline destroyed it
3230   __ get_thread();
3231 
3232   // On entry we have been jumped to by the exception handler (or exception_blob
3233   // for server).  O0 contains the exception oop and O7 contains the original
3234   // exception pc.  So if we push a frame here it will look to the
3235   // stack walking code (fetch_unroll_info) just like a normal call so
3236   // state will be extracted normally.
3237 
3238   // save exception oop in JavaThread and fall through into the
3239   // exception_in_tls case since they are handled in same way except
3240   // for where the pending exception is kept.
3241   __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3242 
3243   //
3244   // Vanilla deoptimization with an exception pending in exception_oop
3245   //
3246   int exception_in_tls_offset = __ offset() - start;
3247 
3248   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3249   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3250 
3251   // Restore G2_thread
3252   __ get_thread();
3253 
3254 #ifdef ASSERT
3255   {
3256     // verify that there is really an exception oop in exception_oop
3257     Label has_exception;
3258     __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3259     __ br_notnull(Oexception, false, Assembler::pt, has_exception);
3260     __ delayed()-> nop();
3261     __ stop("no exception in thread");
3262     __ bind(has_exception);
3263 
3264     // verify that there is no pending exception
3265     Label no_pending_exception;
3266     Address exception_addr(G2_thread, Thread::pending_exception_offset());
3267     __ ld_ptr(exception_addr, Oexception);
3268     __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
3269     __ delayed()->nop();
3270     __ stop("must not have pending exception here");
3271     __ bind(no_pending_exception);
3272   }
3273 #endif
3274 
3275   __ ba(false, cont);
3276   __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3277 
3278   //
3279   // Reexecute entry, similar to c2 uncommon trap
3280   //
3281   int reexecute_offset = __ offset() - start;
3282 
3283   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3284   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3285 
3286   __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3287 
3288   __ bind(cont);
3289 
3290   __ set_last_Java_frame(SP, noreg);
3291 
3292   // do the call by hand so we can get the oopmap
3293 
3294   __ mov(G2_thread, L7_thread_cache);
3295   __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3296   __ delayed()->mov(G2_thread, O0);
3297 
3298   // Set an oopmap for the call site this describes all our saved volatile registers
3299 
3300   oop_maps->add_gc_map( __ offset()-start, map);
3301 
3302   __ mov(L7_thread_cache, G2_thread);
3303 
3304   __ reset_last_Java_frame();
3305 
3306   // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3307   // so this move will survive
3308 
3309   __ mov(L0deopt_mode, G4deopt_mode);
3310 
3311   __ mov(O0, O2UnrollBlock->after_save());
3312 
3313   RegisterSaver::restore_result_registers(masm);
3314 
3315   Label noException;
3316   __ cmp(G4deopt_mode, Deoptimization::Unpack_exception);   // Was exception pending?
3317   __ br(Assembler::notEqual, false, Assembler::pt, noException);
3318   __ delayed()->nop();
3319 
3320   // Move the pending exception from exception_oop to Oexception so
3321   // the pending exception will be picked up the interpreter.
3322   __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3323   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3324   __ bind(noException);
3325 
3326   // deallocate the deoptimization frame taking care to preserve the return values
3327   __ mov(Oreturn0,     Oreturn0->after_save());
3328   __ mov(Oreturn1,     Oreturn1->after_save());
3329   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3330   __ restore();
3331 
3332   // Allocate new interpreter frame(s) and possible c2i adapter frame
3333 
3334   make_new_frames(masm, true);
3335 
3336   // push a dummy "unpack_frame" taking care of float return values and
3337   // call Deoptimization::unpack_frames to have the unpacker layout
3338   // information in the interpreter frames just created and then return
3339   // to the interpreter entry point
3340   __ save(SP, -frame_size_words*wordSize, SP);
3341   __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3342 #if !defined(_LP64)
3343 #if defined(COMPILER2)
3344   // 32-bit 1-register longs return longs in G1
3345   __ stx(Greturn1, saved_Greturn1_addr);
3346 #endif
3347   __ set_last_Java_frame(SP, noreg);
3348   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3349 #else
3350   // LP64 uses g4 in set_last_Java_frame
3351   __ mov(G4deopt_mode, O1);
3352   __ set_last_Java_frame(SP, G0);
3353   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3354 #endif
3355   __ reset_last_Java_frame();
3356   __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3357 
3358 #if !defined(_LP64) && defined(COMPILER2)
3359   // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3360   // I0/I1 if the return value is long.
3361   Label not_long;
3362   __ cmp(O0,T_LONG);
3363   __ br(Assembler::notEqual, false, Assembler::pt, not_long);
3364   __ delayed()->nop();
3365   __ ldd(saved_Greturn1_addr,I0);
3366   __ bind(not_long);
3367 #endif
3368   __ ret();
3369   __ delayed()->restore();
3370 
3371   masm->flush();
3372   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3373   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3374 }
3375 
3376 #ifdef COMPILER2
3377 
3378 //------------------------------generate_uncommon_trap_blob--------------------
3379 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3380 // instead.
3381 void SharedRuntime::generate_uncommon_trap_blob() {
3382   // allocate space for the code
3383   ResourceMark rm;
3384   // setup code generation tools
3385   int pad = VerifyThread ? 512 : 0;
3386 #ifdef _LP64
3387   CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3388 #else
3389   // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3390   // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3391   CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3392 #endif
3393   MacroAssembler* masm               = new MacroAssembler(&buffer);
3394   Register        O2UnrollBlock      = O2;
3395   Register        O2klass_index      = O2;
3396 
3397   //
3398   // This is the entry point for all traps the compiler takes when it thinks
3399   // it cannot handle further execution of compilation code. The frame is
3400   // deoptimized in these cases and converted into interpreter frames for
3401   // execution
3402   // The steps taken by this frame are as follows:
3403   //   - push a fake "unpack_frame"
3404   //   - call the C routine Deoptimization::uncommon_trap (this function
3405   //     packs the current compiled frame into vframe arrays and returns
3406   //     information about the number and size of interpreter frames which
3407   //     are equivalent to the frame which is being deoptimized)
3408   //   - deallocate the "unpack_frame"
3409   //   - deallocate the deoptimization frame
3410   //   - in a loop using the information returned in the previous step
3411   //     push interpreter frames;
3412   //   - create a dummy "unpack_frame"
3413   //   - call the C routine: Deoptimization::unpack_frames (this function
3414   //     lays out values on the interpreter frame which was just created)
3415   //   - deallocate the dummy unpack_frame
3416   //   - return to the interpreter entry point
3417   //
3418   //  Refer to the following methods for more information:
3419   //   - Deoptimization::uncommon_trap
3420   //   - Deoptimization::unpack_frame
3421 
3422   // the unloaded class index is in O0 (first parameter to this blob)
3423 
3424   // push a dummy "unpack_frame"
3425   // and call Deoptimization::uncommon_trap to pack the compiled frame into
3426   // vframe array and return the UnrollBlock information
3427   __ save_frame(0);
3428   __ set_last_Java_frame(SP, noreg);
3429   __ mov(I0, O2klass_index);
3430   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
3431   __ reset_last_Java_frame();
3432   __ mov(O0, O2UnrollBlock->after_save());
3433   __ restore();
3434 
3435   // deallocate the deoptimized frame taking care to preserve the return values
3436   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3437   __ restore();
3438 
3439   // Allocate new interpreter frame(s) and possible c2i adapter frame
3440 
3441   make_new_frames(masm, false);
3442 
3443   // push a dummy "unpack_frame" taking care of float return values and
3444   // call Deoptimization::unpack_frames to have the unpacker layout
3445   // information in the interpreter frames just created and then return
3446   // to the interpreter entry point
3447   __ save_frame(0);
3448   __ set_last_Java_frame(SP, noreg);
3449   __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3450   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3451   __ reset_last_Java_frame();
3452   __ ret();
3453   __ delayed()->restore();
3454 
3455   masm->flush();
3456   _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3457 }
3458 
3459 #endif // COMPILER2
3460 
3461 //------------------------------generate_handler_blob-------------------
3462 //
3463 // Generate a special Compile2Runtime blob that saves all registers, and sets
3464 // up an OopMap.
3465 //
3466 // This blob is jumped to (via a breakpoint and the signal handler) from a
3467 // safepoint in compiled code.  On entry to this blob, O7 contains the
3468 // address in the original nmethod at which we should resume normal execution.
3469 // Thus, this blob looks like a subroutine which must preserve lots of
3470 // registers and return normally.  Note that O7 is never register-allocated,
3471 // so it is guaranteed to be free here.
3472 //
3473 
3474 // The hardest part of what this blob must do is to save the 64-bit %o
3475 // registers in the 32-bit build.  A simple 'save' turn the %o's to %i's and
3476 // an interrupt will chop off their heads.  Making space in the caller's frame
3477 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3478 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3479 // SP and mess up HIS OopMaps.  So we first adjust the caller's SP, then save
3480 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3481 // Tricky, tricky, tricky...
3482 
3483 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
3484   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3485 
3486   // allocate space for the code
3487   ResourceMark rm;
3488   // setup code generation tools
3489   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3490   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3491   // even larger with TraceJumps
3492   int pad = TraceJumps ? 512 : 0;
3493   CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3494   MacroAssembler* masm                = new MacroAssembler(&buffer);
3495   int             frame_size_words;
3496   OopMapSet *oop_maps = new OopMapSet();
3497   OopMap* map = NULL;
3498 
3499   int start = __ offset();
3500 
3501   // If this causes a return before the processing, then do a "restore"
3502   if (cause_return) {
3503     __ restore();
3504   } else {
3505     // Make it look like we were called via the poll
3506     // so that frame constructor always sees a valid return address
3507     __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3508     __ sub(O7, frame::pc_return_offset, O7);
3509   }
3510 
3511   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3512 
3513   // setup last_Java_sp (blows G4)
3514   __ set_last_Java_frame(SP, noreg);
3515 
3516   // call into the runtime to handle illegal instructions exception
3517   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3518   __ mov(G2_thread, O0);
3519   __ save_thread(L7_thread_cache);
3520   __ call(call_ptr);
3521   __ delayed()->nop();
3522 
3523   // Set an oopmap for the call site.
3524   // We need this not only for callee-saved registers, but also for volatile
3525   // registers that the compiler might be keeping live across a safepoint.
3526 
3527   oop_maps->add_gc_map( __ offset() - start, map);
3528 
3529   __ restore_thread(L7_thread_cache);
3530   // clear last_Java_sp
3531   __ reset_last_Java_frame();
3532 
3533   // Check for exceptions
3534   Label pending;
3535 
3536   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3537   __ tst(O1);
3538   __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3539   __ delayed()->nop();
3540 
3541   RegisterSaver::restore_live_registers(masm);
3542 
3543   // We are back the the original state on entry and ready to go.
3544 
3545   __ retl();
3546   __ delayed()->nop();
3547 
3548   // Pending exception after the safepoint
3549 
3550   __ bind(pending);
3551 
3552   RegisterSaver::restore_live_registers(masm);
3553 
3554   // We are back the the original state on entry.
3555 
3556   // Tail-call forward_exception_entry, with the issuing PC in O7,
3557   // so it looks like the original nmethod called forward_exception_entry.
3558   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3559   __ JMP(O0, 0);
3560   __ delayed()->nop();
3561 
3562   // -------------
3563   // make sure all code is generated
3564   masm->flush();
3565 
3566   // return exception blob
3567   return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3568 }
3569 
3570 //
3571 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3572 //
3573 // Generate a stub that calls into vm to find out the proper destination
3574 // of a java call. All the argument registers are live at this point
3575 // but since this is generic code we don't know what they are and the caller
3576 // must do any gc of the args.
3577 //
3578 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3579   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3580 
3581   // allocate space for the code
3582   ResourceMark rm;
3583   // setup code generation tools
3584   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3585   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3586   // even larger with TraceJumps
3587   int pad = TraceJumps ? 512 : 0;
3588   CodeBuffer buffer(name, 1600 + pad, 512);
3589   MacroAssembler* masm                = new MacroAssembler(&buffer);
3590   int             frame_size_words;
3591   OopMapSet *oop_maps = new OopMapSet();
3592   OopMap* map = NULL;
3593 
3594   int start = __ offset();
3595 
3596   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3597 
3598   int frame_complete = __ offset();
3599 
3600   // setup last_Java_sp (blows G4)
3601   __ set_last_Java_frame(SP, noreg);
3602 
3603   // call into the runtime to handle illegal instructions exception
3604   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3605   __ mov(G2_thread, O0);
3606   __ save_thread(L7_thread_cache);
3607   __ call(destination, relocInfo::runtime_call_type);
3608   __ delayed()->nop();
3609 
3610   // O0 contains the address we are going to jump to assuming no exception got installed
3611 
3612   // Set an oopmap for the call site.
3613   // We need this not only for callee-saved registers, but also for volatile
3614   // registers that the compiler might be keeping live across a safepoint.
3615 
3616   oop_maps->add_gc_map( __ offset() - start, map);
3617 
3618   __ restore_thread(L7_thread_cache);
3619   // clear last_Java_sp
3620   __ reset_last_Java_frame();
3621 
3622   // Check for exceptions
3623   Label pending;
3624 
3625   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3626   __ tst(O1);
3627   __ brx(Assembler::notEqual, true, Assembler::pn, pending);
3628   __ delayed()->nop();
3629 
3630   // get the returned methodOop
3631 
3632   __ get_vm_result(G5_method);
3633   __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3634 
3635   // O0 is where we want to jump, overwrite G3 which is saved and scratch
3636 
3637   __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3638 
3639   RegisterSaver::restore_live_registers(masm);
3640 
3641   // We are back the the original state on entry and ready to go.
3642 
3643   __ JMP(G3, 0);
3644   __ delayed()->nop();
3645 
3646   // Pending exception after the safepoint
3647 
3648   __ bind(pending);
3649 
3650   RegisterSaver::restore_live_registers(masm);
3651 
3652   // We are back the the original state on entry.
3653 
3654   // Tail-call forward_exception_entry, with the issuing PC in O7,
3655   // so it looks like the original nmethod called forward_exception_entry.
3656   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3657   __ JMP(O0, 0);
3658   __ delayed()->nop();
3659 
3660   // -------------
3661   // make sure all code is generated
3662   masm->flush();
3663 
3664   // return the  blob
3665   // frame_size_words or bytes??
3666   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3667 }