1 /*
   2  * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/debugInfoRec.hpp"
  28 #include "code/icBuffer.hpp"
  29 #include "code/vtableStubs.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "logging/log.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "oops/compiledICHolder.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/vframeArray.hpp"
  36 #include "vmreg_sparc.inline.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 #ifdef COMPILER2
  41 #include "opto/runtime.hpp"
  42 #endif
  43 #ifdef SHARK
  44 #include "compiler/compileBroker.hpp"
  45 #include "shark/sharkCompiler.hpp"
  46 #endif
  47 #if INCLUDE_JVMCI
  48 #include "jvmci/jvmciJavaClasses.hpp"
  49 #endif
  50 
  51 #define __ masm->
  52 
  53 
  54 class RegisterSaver {
  55 
  56   // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
  57   // The Oregs are problematic. In the 32bit build the compiler can
  58   // have O registers live with 64 bit quantities. A window save will
  59   // cut the heads off of the registers. We have to do a very extensive
  60   // stack dance to save and restore these properly.
  61 
  62   // Note that the Oregs problem only exists if we block at either a polling
  63   // page exception a compiled code safepoint that was not originally a call
  64   // or deoptimize following one of these kinds of safepoints.
  65 
  66   // Lots of registers to save.  For all builds, a window save will preserve
  67   // the %i and %l registers.  For the 32-bit longs-in-two entries and 64-bit
  68   // builds a window-save will preserve the %o registers.  In the LION build
  69   // we need to save the 64-bit %o registers which requires we save them
  70   // before the window-save (as then they become %i registers and get their
  71   // heads chopped off on interrupt).  We have to save some %g registers here
  72   // as well.
  73   enum {
  74     // This frame's save area.  Includes extra space for the native call:
  75     // vararg's layout space and the like.  Briefly holds the caller's
  76     // register save area.
  77     call_args_area = frame::register_save_words_sp_offset +
  78                      frame::memory_parameter_word_sp_offset*wordSize,
  79     // Make sure save locations are always 8 byte aligned.
  80     // can't use round_to because it doesn't produce compile time constant
  81     start_of_extra_save_area = ((call_args_area + 7) & ~7),
  82     g1_offset = start_of_extra_save_area, // g-regs needing saving
  83     g3_offset = g1_offset+8,
  84     g4_offset = g3_offset+8,
  85     g5_offset = g4_offset+8,
  86     o0_offset = g5_offset+8,
  87     o1_offset = o0_offset+8,
  88     o2_offset = o1_offset+8,
  89     o3_offset = o2_offset+8,
  90     o4_offset = o3_offset+8,
  91     o5_offset = o4_offset+8,
  92     start_of_flags_save_area = o5_offset+8,
  93     ccr_offset = start_of_flags_save_area,
  94     fsr_offset = ccr_offset + 8,
  95     d00_offset = fsr_offset+8,  // Start of float save area
  96     register_save_size = d00_offset+8*32
  97   };
  98 
  99 
 100   public:
 101 
 102   static int Oexception_offset() { return o0_offset; };
 103   static int G3_offset() { return g3_offset; };
 104   static int G5_offset() { return g5_offset; };
 105   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
 106   static void restore_live_registers(MacroAssembler* masm);
 107 
 108   // During deoptimization only the result register need to be restored
 109   // all the other values have already been extracted.
 110 
 111   static void restore_result_registers(MacroAssembler* masm);
 112 };
 113 
 114 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 115   // Record volatile registers as callee-save values in an OopMap so their save locations will be
 116   // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
 117   // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
 118   // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
 119   // (as the stub's I's) when the runtime routine called by the stub creates its frame.
 120   int i;
 121   // Always make the frame size 16 byte aligned.
 122   int frame_size = round_to(additional_frame_words + register_save_size, 16);
 123   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
 124   int frame_size_in_slots = frame_size / sizeof(jint);
 125   // CodeBlob frame size is in words.
 126   *total_frame_words = frame_size / wordSize;
 127   // OopMap* map = new OopMap(*total_frame_words, 0);
 128   OopMap* map = new OopMap(frame_size_in_slots, 0);
 129 
 130 #if !defined(_LP64)
 131 
 132   // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
 133   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 134   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 135   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
 136   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
 137   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
 138   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
 139 #endif /* _LP64 */
 140 
 141   __ save(SP, -frame_size, SP);
 142 
 143 #ifndef _LP64
 144   // Reload the 64 bit Oregs. Although they are now Iregs we load them
 145   // to Oregs here to avoid interrupts cutting off their heads
 146 
 147   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 148   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 149   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
 150   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
 151   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
 152   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
 153 
 154   __ stx(O0, SP, o0_offset+STACK_BIAS);
 155   map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
 156 
 157   __ stx(O1, SP, o1_offset+STACK_BIAS);
 158 
 159   map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
 160 
 161   __ stx(O2, SP, o2_offset+STACK_BIAS);
 162   map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
 163 
 164   __ stx(O3, SP, o3_offset+STACK_BIAS);
 165   map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
 166 
 167   __ stx(O4, SP, o4_offset+STACK_BIAS);
 168   map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
 169 
 170   __ stx(O5, SP, o5_offset+STACK_BIAS);
 171   map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
 172 #endif /* _LP64 */
 173 
 174 
 175 #ifdef _LP64
 176   int debug_offset = 0;
 177 #else
 178   int debug_offset = 4;
 179 #endif
 180   // Save the G's
 181   __ stx(G1, SP, g1_offset+STACK_BIAS);
 182   map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
 183 
 184   __ stx(G3, SP, g3_offset+STACK_BIAS);
 185   map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
 186 
 187   __ stx(G4, SP, g4_offset+STACK_BIAS);
 188   map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
 189 
 190   __ stx(G5, SP, g5_offset+STACK_BIAS);
 191   map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
 192 
 193   // This is really a waste but we'll keep things as they were for now
 194   if (true) {
 195 #ifndef _LP64
 196     map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
 197     map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
 198     map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
 199     map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
 200     map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
 201     map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
 202     map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
 203     map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
 204     map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
 205     map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
 206 #endif /* _LP64 */
 207   }
 208 
 209 
 210   // Save the flags
 211   __ rdccr( G5 );
 212   __ stx(G5, SP, ccr_offset+STACK_BIAS);
 213   __ stxfsr(SP, fsr_offset+STACK_BIAS);
 214 
 215   // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
 216   int offset = d00_offset;
 217   for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
 218     FloatRegister f = as_FloatRegister(i);
 219     __ stf(FloatRegisterImpl::D,  f, SP, offset+STACK_BIAS);
 220     // Record as callee saved both halves of double registers (2 float registers).
 221     map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
 222     map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
 223     offset += sizeof(double);
 224   }
 225 
 226   // And we're done.
 227 
 228   return map;
 229 }
 230 
 231 
 232 // Pop the current frame and restore all the registers that we
 233 // saved.
 234 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 235 
 236   // Restore all the FP registers
 237   for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
 238     __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
 239   }
 240 
 241   __ ldx(SP, ccr_offset+STACK_BIAS, G1);
 242   __ wrccr (G1) ;
 243 
 244   // Restore the G's
 245   // Note that G2 (AKA GThread) must be saved and restored separately.
 246   // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
 247 
 248   __ ldx(SP, g1_offset+STACK_BIAS, G1);
 249   __ ldx(SP, g3_offset+STACK_BIAS, G3);
 250   __ ldx(SP, g4_offset+STACK_BIAS, G4);
 251   __ ldx(SP, g5_offset+STACK_BIAS, G5);
 252 
 253 
 254 #if !defined(_LP64)
 255   // Restore the 64-bit O's.
 256   __ ldx(SP, o0_offset+STACK_BIAS, O0);
 257   __ ldx(SP, o1_offset+STACK_BIAS, O1);
 258   __ ldx(SP, o2_offset+STACK_BIAS, O2);
 259   __ ldx(SP, o3_offset+STACK_BIAS, O3);
 260   __ ldx(SP, o4_offset+STACK_BIAS, O4);
 261   __ ldx(SP, o5_offset+STACK_BIAS, O5);
 262 
 263   // And temporarily place them in TLS
 264 
 265   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 266   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 267   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
 268   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
 269   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
 270   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
 271 #endif /* _LP64 */
 272 
 273   // Restore flags
 274 
 275   __ ldxfsr(SP, fsr_offset+STACK_BIAS);
 276 
 277   __ restore();
 278 
 279 #if !defined(_LP64)
 280   // Now reload the 64bit Oregs after we've restore the window.
 281   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 282   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 283   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
 284   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
 285   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
 286   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
 287 #endif /* _LP64 */
 288 
 289 }
 290 
 291 // Pop the current frame and restore the registers that might be holding
 292 // a result.
 293 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 294 
 295 #if !defined(_LP64)
 296   // 32bit build returns longs in G1
 297   __ ldx(SP, g1_offset+STACK_BIAS, G1);
 298 
 299   // Retrieve the 64-bit O's.
 300   __ ldx(SP, o0_offset+STACK_BIAS, O0);
 301   __ ldx(SP, o1_offset+STACK_BIAS, O1);
 302   // and save to TLS
 303   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 304   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 305 #endif /* _LP64 */
 306 
 307   __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
 308 
 309   __ restore();
 310 
 311 #if !defined(_LP64)
 312   // Now reload the 64bit Oregs after we've restore the window.
 313   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 314   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 315 #endif /* _LP64 */
 316 
 317 }
 318 
 319 // Is vector's size (in bytes) bigger than a size saved by default?
 320 // 8 bytes FP registers are saved by default on SPARC.
 321 bool SharedRuntime::is_wide_vector(int size) {
 322   // Note, MaxVectorSize == 8 on SPARC.
 323   assert(size <= 8, "%d bytes vectors are not supported", size);
 324   return size > 8;
 325 }
 326 
 327 size_t SharedRuntime::trampoline_size() {
 328   return 40;
 329 }
 330 
 331 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 332   __ set((intptr_t)destination, G3_scratch);
 333   __ JMP(G3_scratch, 0);
 334   __ delayed()->nop();
 335 }
 336 
 337 // The java_calling_convention describes stack locations as ideal slots on
 338 // a frame with no abi restrictions. Since we must observe abi restrictions
 339 // (like the placement of the register window) the slots must be biased by
 340 // the following value.
 341 static int reg2offset(VMReg r) {
 342   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 343 }
 344 
 345 static VMRegPair reg64_to_VMRegPair(Register r) {
 346   VMRegPair ret;
 347   if (wordSize == 8) {
 348     ret.set2(r->as_VMReg());
 349   } else {
 350     ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
 351   }
 352   return ret;
 353 }
 354 
 355 // ---------------------------------------------------------------------------
 356 // Read the array of BasicTypes from a signature, and compute where the
 357 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
 358 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 359 // refer to 4-byte stack slots.  All stack slots are based off of the window
 360 // top.  VMRegImpl::stack0 refers to the first slot past the 16-word window,
 361 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 362 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
 363 // integer registers.  Values 64-95 are the (32-bit only) float registers.
 364 // Each 32-bit quantity is given its own number, so the integer registers
 365 // (in either 32- or 64-bit builds) use 2 numbers.  For example, there is
 366 // an O0-low and an O0-high.  Essentially, all int register numbers are doubled.
 367 
 368 // Register results are passed in O0-O5, for outgoing call arguments.  To
 369 // convert to incoming arguments, convert all O's to I's.  The regs array
 370 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
 371 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
 372 // 32-bit value was passed).  If both are VMRegImpl::Bad(), it means no value was
 373 // passed (used as a placeholder for the other half of longs and doubles in
 374 // the 64-bit build).  regs[].second() is either VMRegImpl::Bad() or regs[].second() is
 375 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
 376 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
 377 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
 378 // same VMRegPair.
 379 
 380 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 381 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 382 // units regardless of build.
 383 
 384 
 385 // ---------------------------------------------------------------------------
 386 // The compiled Java calling convention.  The Java convention always passes
 387 // 64-bit values in adjacent aligned locations (either registers or stack),
 388 // floats in float registers and doubles in aligned float pairs.  There is
 389 // no backing varargs store for values in registers.
 390 // In the 32-bit build, longs are passed on the stack (cannot be
 391 // passed in I's, because longs in I's get their heads chopped off at
 392 // interrupt).
 393 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 394                                            VMRegPair *regs,
 395                                            int total_args_passed,
 396                                            int is_outgoing) {
 397   assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
 398 
 399   const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
 400   const int flt_reg_max = 8;
 401 
 402   int int_reg = 0;
 403   int flt_reg = 0;
 404   int slot = 0;
 405 
 406   for (int i = 0; i < total_args_passed; i++) {
 407     switch (sig_bt[i]) {
 408     case T_INT:
 409     case T_SHORT:
 410     case T_CHAR:
 411     case T_BYTE:
 412     case T_BOOLEAN:
 413 #ifndef _LP64
 414     case T_OBJECT:
 415     case T_ARRAY:
 416     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 417 #endif // _LP64
 418       if (int_reg < int_reg_max) {
 419         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 420         regs[i].set1(r->as_VMReg());
 421       } else {
 422         regs[i].set1(VMRegImpl::stack2reg(slot++));
 423       }
 424       break;
 425 
 426 #ifdef _LP64
 427     case T_LONG:
 428       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
 429       // fall-through
 430     case T_OBJECT:
 431     case T_ARRAY:
 432     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 433       if (int_reg < int_reg_max) {
 434         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 435         regs[i].set2(r->as_VMReg());
 436       } else {
 437         slot = round_to(slot, 2);  // align
 438         regs[i].set2(VMRegImpl::stack2reg(slot));
 439         slot += 2;
 440       }
 441       break;
 442 #else
 443     case T_LONG:
 444       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
 445       // On 32-bit SPARC put longs always on the stack to keep the pressure off
 446       // integer argument registers.  They should be used for oops.
 447       slot = round_to(slot, 2);  // align
 448       regs[i].set2(VMRegImpl::stack2reg(slot));
 449       slot += 2;
 450 #endif
 451       break;
 452 
 453     case T_FLOAT:
 454       if (flt_reg < flt_reg_max) {
 455         FloatRegister r = as_FloatRegister(flt_reg++);
 456         regs[i].set1(r->as_VMReg());
 457       } else {
 458         regs[i].set1(VMRegImpl::stack2reg(slot++));
 459       }
 460       break;
 461 
 462     case T_DOUBLE:
 463       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
 464       if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
 465         flt_reg = round_to(flt_reg, 2);  // align
 466         FloatRegister r = as_FloatRegister(flt_reg);
 467         regs[i].set2(r->as_VMReg());
 468         flt_reg += 2;
 469       } else {
 470         slot = round_to(slot, 2);  // align
 471         regs[i].set2(VMRegImpl::stack2reg(slot));
 472         slot += 2;
 473       }
 474       break;
 475 
 476     case T_VOID:
 477       regs[i].set_bad();   // Halves of longs & doubles
 478       break;
 479 
 480     default:
 481       fatal("unknown basic type %d", sig_bt[i]);
 482       break;
 483     }
 484   }
 485 
 486   // retun the amount of stack space these arguments will need.
 487   return slot;
 488 }
 489 
 490 // Helper class mostly to avoid passing masm everywhere, and handle
 491 // store displacement overflow logic.
 492 class AdapterGenerator {
 493   MacroAssembler *masm;
 494   Register Rdisp;
 495   void set_Rdisp(Register r)  { Rdisp = r; }
 496 
 497   void patch_callers_callsite();
 498 
 499   // base+st_off points to top of argument
 500   int arg_offset(const int st_off) { return st_off; }
 501   int next_arg_offset(const int st_off) {
 502     return st_off - Interpreter::stackElementSize;
 503   }
 504 
 505   // Argument slot values may be loaded first into a register because
 506   // they might not fit into displacement.
 507   RegisterOrConstant arg_slot(const int st_off);
 508   RegisterOrConstant next_arg_slot(const int st_off);
 509 
 510   // Stores long into offset pointed to by base
 511   void store_c2i_long(Register r, Register base,
 512                       const int st_off, bool is_stack);
 513   void store_c2i_object(Register r, Register base,
 514                         const int st_off);
 515   void store_c2i_int(Register r, Register base,
 516                      const int st_off);
 517   void store_c2i_double(VMReg r_2,
 518                         VMReg r_1, Register base, const int st_off);
 519   void store_c2i_float(FloatRegister f, Register base,
 520                        const int st_off);
 521 
 522  public:
 523   void gen_c2i_adapter(int total_args_passed,
 524                               // VMReg max_arg,
 525                               int comp_args_on_stack, // VMRegStackSlots
 526                               const BasicType *sig_bt,
 527                               const VMRegPair *regs,
 528                               Label& skip_fixup);
 529   void gen_i2c_adapter(int total_args_passed,
 530                        // VMReg max_arg,
 531                        int comp_args_on_stack, // VMRegStackSlots
 532                        const BasicType *sig_bt,
 533                        const VMRegPair *regs);
 534 
 535   AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
 536 };
 537 
 538 
 539 // Patch the callers callsite with entry to compiled code if it exists.
 540 void AdapterGenerator::patch_callers_callsite() {
 541   Label L;
 542   __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
 543   __ br_null(G3_scratch, false, Assembler::pt, L);
 544   __ delayed()->nop();
 545   // Call into the VM to patch the caller, then jump to compiled callee
 546   __ save_frame(4);     // Args in compiled layout; do not blow them
 547 
 548   // Must save all the live Gregs the list is:
 549   // G1: 1st Long arg (32bit build)
 550   // G2: global allocated to TLS
 551   // G3: used in inline cache check (scratch)
 552   // G4: 2nd Long arg (32bit build);
 553   // G5: used in inline cache check (Method*)
 554 
 555   // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
 556 
 557 #ifdef _LP64
 558   // mov(s,d)
 559   __ mov(G1, L1);
 560   __ mov(G4, L4);
 561   __ mov(G5_method, L5);
 562   __ mov(G5_method, O0);         // VM needs target method
 563   __ mov(I7, O1);                // VM needs caller's callsite
 564   // Must be a leaf call...
 565   // can be very far once the blob has been relocated
 566   AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
 567   __ relocate(relocInfo::runtime_call_type);
 568   __ jumpl_to(dest, O7, O7);
 569   __ delayed()->mov(G2_thread, L7_thread_cache);
 570   __ mov(L7_thread_cache, G2_thread);
 571   __ mov(L1, G1);
 572   __ mov(L4, G4);
 573   __ mov(L5, G5_method);
 574 #else
 575   __ stx(G1, FP, -8 + STACK_BIAS);
 576   __ stx(G4, FP, -16 + STACK_BIAS);
 577   __ mov(G5_method, L5);
 578   __ mov(G5_method, O0);         // VM needs target method
 579   __ mov(I7, O1);                // VM needs caller's callsite
 580   // Must be a leaf call...
 581   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
 582   __ delayed()->mov(G2_thread, L7_thread_cache);
 583   __ mov(L7_thread_cache, G2_thread);
 584   __ ldx(FP, -8 + STACK_BIAS, G1);
 585   __ ldx(FP, -16 + STACK_BIAS, G4);
 586   __ mov(L5, G5_method);
 587 #endif /* _LP64 */
 588 
 589   __ restore();      // Restore args
 590   __ bind(L);
 591 }
 592 
 593 
 594 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
 595   RegisterOrConstant roc(arg_offset(st_off));
 596   return __ ensure_simm13_or_reg(roc, Rdisp);
 597 }
 598 
 599 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
 600   RegisterOrConstant roc(next_arg_offset(st_off));
 601   return __ ensure_simm13_or_reg(roc, Rdisp);
 602 }
 603 
 604 
 605 // Stores long into offset pointed to by base
 606 void AdapterGenerator::store_c2i_long(Register r, Register base,
 607                                       const int st_off, bool is_stack) {
 608 #ifdef _LP64
 609   // In V9, longs are given 2 64-bit slots in the interpreter, but the
 610   // data is passed in only 1 slot.
 611   __ stx(r, base, next_arg_slot(st_off));
 612 #else
 613 #ifdef COMPILER2
 614   // Misaligned store of 64-bit data
 615   __ stw(r, base, arg_slot(st_off));    // lo bits
 616   __ srlx(r, 32, r);
 617   __ stw(r, base, next_arg_slot(st_off));  // hi bits
 618 #else
 619   if (is_stack) {
 620     // Misaligned store of 64-bit data
 621     __ stw(r, base, arg_slot(st_off));    // lo bits
 622     __ srlx(r, 32, r);
 623     __ stw(r, base, next_arg_slot(st_off));  // hi bits
 624   } else {
 625     __ stw(r->successor(), base, arg_slot(st_off)     ); // lo bits
 626     __ stw(r             , base, next_arg_slot(st_off)); // hi bits
 627   }
 628 #endif // COMPILER2
 629 #endif // _LP64
 630 }
 631 
 632 void AdapterGenerator::store_c2i_object(Register r, Register base,
 633                       const int st_off) {
 634   __ st_ptr (r, base, arg_slot(st_off));
 635 }
 636 
 637 void AdapterGenerator::store_c2i_int(Register r, Register base,
 638                    const int st_off) {
 639   __ st (r, base, arg_slot(st_off));
 640 }
 641 
 642 // Stores into offset pointed to by base
 643 void AdapterGenerator::store_c2i_double(VMReg r_2,
 644                       VMReg r_1, Register base, const int st_off) {
 645 #ifdef _LP64
 646   // In V9, doubles are given 2 64-bit slots in the interpreter, but the
 647   // data is passed in only 1 slot.
 648   __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
 649 #else
 650   // Need to marshal 64-bit value from misaligned Lesp loads
 651   __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
 652   __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
 653 #endif
 654 }
 655 
 656 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
 657                                        const int st_off) {
 658   __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
 659 }
 660 
 661 void AdapterGenerator::gen_c2i_adapter(
 662                             int total_args_passed,
 663                             // VMReg max_arg,
 664                             int comp_args_on_stack, // VMRegStackSlots
 665                             const BasicType *sig_bt,
 666                             const VMRegPair *regs,
 667                             Label& L_skip_fixup) {
 668 
 669   // Before we get into the guts of the C2I adapter, see if we should be here
 670   // at all.  We've come from compiled code and are attempting to jump to the
 671   // interpreter, which means the caller made a static call to get here
 672   // (vcalls always get a compiled target if there is one).  Check for a
 673   // compiled target.  If there is one, we need to patch the caller's call.
 674   // However we will run interpreted if we come thru here. The next pass
 675   // thru the call site will run compiled. If we ran compiled here then
 676   // we can (theorectically) do endless i2c->c2i->i2c transitions during
 677   // deopt/uncommon trap cycles. If we always go interpreted here then
 678   // we can have at most one and don't need to play any tricks to keep
 679   // from endlessly growing the stack.
 680   //
 681   // Actually if we detected that we had an i2c->c2i transition here we
 682   // ought to be able to reset the world back to the state of the interpreted
 683   // call and not bother building another interpreter arg area. We don't
 684   // do that at this point.
 685 
 686   patch_callers_callsite();
 687 
 688   __ bind(L_skip_fixup);
 689 
 690   // Since all args are passed on the stack, total_args_passed*wordSize is the
 691   // space we need.  Add in varargs area needed by the interpreter. Round up
 692   // to stack alignment.
 693   const int arg_size = total_args_passed * Interpreter::stackElementSize;
 694   const int varargs_area =
 695                  (frame::varargs_offset - frame::register_save_words)*wordSize;
 696   const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
 697 
 698   const int bias = STACK_BIAS;
 699   const int interp_arg_offset = frame::varargs_offset*wordSize +
 700                         (total_args_passed-1)*Interpreter::stackElementSize;
 701 
 702   const Register base = SP;
 703 
 704   // Make some extra space on the stack.
 705   __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP);
 706   set_Rdisp(G3_scratch);
 707 
 708   // Write the args into the outgoing interpreter space.
 709   for (int i = 0; i < total_args_passed; i++) {
 710     const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
 711     VMReg r_1 = regs[i].first();
 712     VMReg r_2 = regs[i].second();
 713     if (!r_1->is_valid()) {
 714       assert(!r_2->is_valid(), "");
 715       continue;
 716     }
 717     if (r_1->is_stack()) {        // Pretend stack targets are loaded into G1
 718       RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias;
 719       ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp);
 720       r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
 721       if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
 722       else                  __ ldx(base, ld_off, G1_scratch);
 723     }
 724 
 725     if (r_1->is_Register()) {
 726       Register r = r_1->as_Register()->after_restore();
 727       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
 728         store_c2i_object(r, base, st_off);
 729       } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 730         store_c2i_long(r, base, st_off, r_2->is_stack());
 731       } else {
 732         store_c2i_int(r, base, st_off);
 733       }
 734     } else {
 735       assert(r_1->is_FloatRegister(), "");
 736       if (sig_bt[i] == T_FLOAT) {
 737         store_c2i_float(r_1->as_FloatRegister(), base, st_off);
 738       } else {
 739         assert(sig_bt[i] == T_DOUBLE, "wrong type");
 740         store_c2i_double(r_2, r_1, base, st_off);
 741       }
 742     }
 743   }
 744 
 745   // Load the interpreter entry point.
 746   __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
 747 
 748   // Pass O5_savedSP as an argument to the interpreter.
 749   // The interpreter will restore SP to this value before returning.
 750   __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP);
 751 
 752   __ mov((frame::varargs_offset)*wordSize -
 753          1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
 754   // Jump to the interpreter just as if interpreter was doing it.
 755   __ jmpl(G3_scratch, 0, G0);
 756   // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
 757   // (really L0) is in use by the compiled frame as a generic temp.  However,
 758   // the interpreter does not know where its args are without some kind of
 759   // arg pointer being passed in.  Pass it in Gargs.
 760   __ delayed()->add(SP, G1, Gargs);
 761 }
 762 
 763 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
 764                         address code_start, address code_end,
 765                         Label& L_ok) {
 766   Label L_fail;
 767   __ set(ExternalAddress(code_start), temp_reg);
 768   __ set(pointer_delta(code_end, code_start, 1), temp2_reg);
 769   __ cmp(pc_reg, temp_reg);
 770   __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
 771   __ delayed()->add(temp_reg, temp2_reg, temp_reg);
 772   __ cmp(pc_reg, temp_reg);
 773   __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
 774   __ bind(L_fail);
 775 }
 776 
 777 void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
 778                                        // VMReg max_arg,
 779                                        int comp_args_on_stack, // VMRegStackSlots
 780                                        const BasicType *sig_bt,
 781                                        const VMRegPair *regs) {
 782   // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
 783   // layout.  Lesp was saved by the calling I-frame and will be restored on
 784   // return.  Meanwhile, outgoing arg space is all owned by the callee
 785   // C-frame, so we can mangle it at will.  After adjusting the frame size,
 786   // hoist register arguments and repack other args according to the compiled
 787   // code convention.  Finally, end in a jump to the compiled code.  The entry
 788   // point address is the start of the buffer.
 789 
 790   // We will only enter here from an interpreted frame and never from after
 791   // passing thru a c2i. Azul allowed this but we do not. If we lose the
 792   // race and use a c2i we will remain interpreted for the race loser(s).
 793   // This removes all sorts of headaches on the x86 side and also eliminates
 794   // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
 795 
 796   // More detail:
 797   // Adapters can be frameless because they do not require the caller
 798   // to perform additional cleanup work, such as correcting the stack pointer.
 799   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 800   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 801   // even if a callee has modified the stack pointer.
 802   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 803   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 804   // up via the senderSP register).
 805   // In other words, if *either* the caller or callee is interpreted, we can
 806   // get the stack pointer repaired after a call.
 807   // This is why c2i and i2c adapters cannot be indefinitely composed.
 808   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 809   // both caller and callee would be compiled methods, and neither would
 810   // clean up the stack pointer changes performed by the two adapters.
 811   // If this happens, control eventually transfers back to the compiled
 812   // caller, but with an uncorrected stack, causing delayed havoc.
 813 
 814   if (VerifyAdapterCalls &&
 815       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 816     // So, let's test for cascading c2i/i2c adapters right now.
 817     //  assert(Interpreter::contains($return_addr) ||
 818     //         StubRoutines::contains($return_addr),
 819     //         "i2c adapter must return to an interpreter frame");
 820     __ block_comment("verify_i2c { ");
 821     Label L_ok;
 822     if (Interpreter::code() != NULL)
 823       range_check(masm, O7, O0, O1,
 824                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 825                   L_ok);
 826     if (StubRoutines::code1() != NULL)
 827       range_check(masm, O7, O0, O1,
 828                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 829                   L_ok);
 830     if (StubRoutines::code2() != NULL)
 831       range_check(masm, O7, O0, O1,
 832                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 833                   L_ok);
 834     const char* msg = "i2c adapter must return to an interpreter frame";
 835     __ block_comment(msg);
 836     __ stop(msg);
 837     __ bind(L_ok);
 838     __ block_comment("} verify_i2ce ");
 839   }
 840 
 841   // As you can see from the list of inputs & outputs there are not a lot
 842   // of temp registers to work with: mostly G1, G3 & G4.
 843 
 844   // Inputs:
 845   // G2_thread      - TLS
 846   // G5_method      - Method oop
 847   // G4 (Gargs)     - Pointer to interpreter's args
 848   // O0..O4         - free for scratch
 849   // O5_savedSP     - Caller's saved SP, to be restored if needed
 850   // O6             - Current SP!
 851   // O7             - Valid return address
 852   // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
 853 
 854   // Outputs:
 855   // G2_thread      - TLS
 856   // O0-O5          - Outgoing args in compiled layout
 857   // O6             - Adjusted or restored SP
 858   // O7             - Valid return address
 859   // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
 860   // F0-F7          - more outgoing args
 861 
 862 
 863   // Gargs is the incoming argument base, and also an outgoing argument.
 864   __ sub(Gargs, BytesPerWord, Gargs);
 865 
 866   // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
 867   // WITH O7 HOLDING A VALID RETURN PC
 868   //
 869   // |              |
 870   // :  java stack  :
 871   // |              |
 872   // +--------------+ <--- start of outgoing args
 873   // |   receiver   |   |
 874   // : rest of args :   |---size is java-arg-words
 875   // |              |   |
 876   // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
 877   // |              |   |
 878   // :    unused    :   |---Space for max Java stack, plus stack alignment
 879   // |              |   |
 880   // +--------------+ <--- SP + 16*wordsize
 881   // |              |
 882   // :    window    :
 883   // |              |
 884   // +--------------+ <--- SP
 885 
 886   // WE REPACK THE STACK.  We use the common calling convention layout as
 887   // discovered by calling SharedRuntime::calling_convention.  We assume it
 888   // causes an arbitrary shuffle of memory, which may require some register
 889   // temps to do the shuffle.  We hope for (and optimize for) the case where
 890   // temps are not needed.  We may have to resize the stack slightly, in case
 891   // we need alignment padding (32-bit interpreter can pass longs & doubles
 892   // misaligned, but the compilers expect them aligned).
 893   //
 894   // |              |
 895   // :  java stack  :
 896   // |              |
 897   // +--------------+ <--- start of outgoing args
 898   // |  pad, align  |   |
 899   // +--------------+   |
 900   // | ints, longs, |   |
 901   // |    floats,   |   |---Outgoing stack args.
 902   // :    doubles   :   |   First few args in registers.
 903   // |              |   |
 904   // +--------------+ <--- SP' + 16*wordsize
 905   // |              |
 906   // :    window    :
 907   // |              |
 908   // +--------------+ <--- SP'
 909 
 910   // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
 911   // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
 912   // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
 913 
 914   // Cut-out for having no stack args.  Since up to 6 args are passed
 915   // in registers, we will commonly have no stack args.
 916   if (comp_args_on_stack > 0) {
 917     // Convert VMReg stack slots to words.
 918     int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 919     // Round up to miminum stack alignment, in wordSize
 920     comp_words_on_stack = round_to(comp_words_on_stack, 2);
 921     // Now compute the distance from Lesp to SP.  This calculation does not
 922     // include the space for total_args_passed because Lesp has not yet popped
 923     // the arguments.
 924     __ sub(SP, (comp_words_on_stack)*wordSize, SP);
 925   }
 926 
 927   // Now generate the shuffle code.  Pick up all register args and move the
 928   // rest through G1_scratch.
 929   for (int i = 0; i < total_args_passed; i++) {
 930     if (sig_bt[i] == T_VOID) {
 931       // Longs and doubles are passed in native word order, but misaligned
 932       // in the 32-bit build.
 933       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 934       continue;
 935     }
 936 
 937     // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
 938     // 32-bit build and aligned in the 64-bit build.  Look for the obvious
 939     // ldx/lddf optimizations.
 940 
 941     // Load in argument order going down.
 942     const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
 943     set_Rdisp(G1_scratch);
 944 
 945     VMReg r_1 = regs[i].first();
 946     VMReg r_2 = regs[i].second();
 947     if (!r_1->is_valid()) {
 948       assert(!r_2->is_valid(), "");
 949       continue;
 950     }
 951     if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
 952       r_1 = F8->as_VMReg();        // as part of the load/store shuffle
 953       if (r_2->is_valid()) r_2 = r_1->next();
 954     }
 955     if (r_1->is_Register()) {  // Register argument
 956       Register r = r_1->as_Register()->after_restore();
 957       if (!r_2->is_valid()) {
 958         __ ld(Gargs, arg_slot(ld_off), r);
 959       } else {
 960 #ifdef _LP64
 961         // In V9, longs are given 2 64-bit slots in the interpreter, but the
 962         // data is passed in only 1 slot.
 963         RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
 964               next_arg_slot(ld_off) : arg_slot(ld_off);
 965         __ ldx(Gargs, slot, r);
 966 #else
 967         fatal("longs should be on stack");
 968 #endif
 969       }
 970     } else {
 971       assert(r_1->is_FloatRegister(), "");
 972       if (!r_2->is_valid()) {
 973         __ ldf(FloatRegisterImpl::S, Gargs,      arg_slot(ld_off), r_1->as_FloatRegister());
 974       } else {
 975 #ifdef _LP64
 976         // In V9, doubles are given 2 64-bit slots in the interpreter, but the
 977         // data is passed in only 1 slot.  This code also handles longs that
 978         // are passed on the stack, but need a stack-to-stack move through a
 979         // spare float register.
 980         RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
 981               next_arg_slot(ld_off) : arg_slot(ld_off);
 982         __ ldf(FloatRegisterImpl::D, Gargs,                  slot, r_1->as_FloatRegister());
 983 #else
 984         // Need to marshal 64-bit value from misaligned Lesp loads
 985         __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
 986         __ ldf(FloatRegisterImpl::S, Gargs,      arg_slot(ld_off), r_2->as_FloatRegister());
 987 #endif
 988       }
 989     }
 990     // Was the argument really intended to be on the stack, but was loaded
 991     // into F8/F9?
 992     if (regs[i].first()->is_stack()) {
 993       assert(r_1->as_FloatRegister() == F8, "fix this code");
 994       // Convert stack slot to an SP offset
 995       int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
 996       // Store down the shuffled stack word.  Target address _is_ aligned.
 997       RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
 998       if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
 999       else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
1000     }
1001   }
1002 
1003   // Jump to the compiled code just as if compiled code was doing it.
1004   __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
1005 #if INCLUDE_JVMCI
1006   if (EnableJVMCI) {
1007     // check if this call should be routed towards a specific entry point
1008     __ ld(Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), G1);
1009     __ cmp(G0, G1);
1010     Label no_alternative_target;
1011     __ br(Assembler::equal, false, Assembler::pn, no_alternative_target);
1012     __ delayed()->nop();
1013 
1014     __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()), G3);
1015     __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1016 
1017     __ bind(no_alternative_target);
1018   }
1019 #endif // INCLUDE_JVMCI
1020 
1021   // 6243940 We might end up in handle_wrong_method if
1022   // the callee is deoptimized as we race thru here. If that
1023   // happens we don't want to take a safepoint because the
1024   // caller frame will look interpreted and arguments are now
1025   // "compiled" so it is much better to make this transition
1026   // invisible to the stack walking code. Unfortunately if
1027   // we try and find the callee by normal means a safepoint
1028   // is possible. So we stash the desired callee in the thread
1029   // and the vm will find there should this case occur.
1030   Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1031   __ st_ptr(G5_method, callee_target_addr);
1032   __ jmpl(G3, 0, G0);
1033   __ delayed()->nop();
1034 }
1035 
1036 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
1037                                     int total_args_passed,
1038                                     int comp_args_on_stack,
1039                                     const BasicType *sig_bt,
1040                                     const VMRegPair *regs) {
1041   AdapterGenerator agen(masm);
1042   agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1043 }
1044 
1045 // ---------------------------------------------------------------
1046 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1047                                                             int total_args_passed,
1048                                                             // VMReg max_arg,
1049                                                             int comp_args_on_stack, // VMRegStackSlots
1050                                                             const BasicType *sig_bt,
1051                                                             const VMRegPair *regs,
1052                                                             AdapterFingerPrint* fingerprint) {
1053   address i2c_entry = __ pc();
1054 
1055   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
1056 
1057 
1058   // -------------------------------------------------------------------------
1059   // Generate a C2I adapter.  On entry we know G5 holds the Method*.  The
1060   // args start out packed in the compiled layout.  They need to be unpacked
1061   // into the interpreter layout.  This will almost always require some stack
1062   // space.  We grow the current (compiled) stack, then repack the args.  We
1063   // finally end in a jump to the generic interpreter entry point.  On exit
1064   // from the interpreter, the interpreter will restore our SP (lest the
1065   // compiled code, which relys solely on SP and not FP, get sick).
1066 
1067   address c2i_unverified_entry = __ pc();
1068   Label L_skip_fixup;
1069   {
1070     Register R_temp = G1;  // another scratch register
1071 
1072     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1073 
1074     __ verify_oop(O0);
1075     __ load_klass(O0, G3_scratch);
1076 
1077     __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
1078     __ cmp(G3_scratch, R_temp);
1079 
1080     Label ok, ok2;
1081     __ brx(Assembler::equal, false, Assembler::pt, ok);
1082     __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
1083     __ jump_to(ic_miss, G3_scratch);
1084     __ delayed()->nop();
1085 
1086     __ bind(ok);
1087     // Method might have been compiled since the call site was patched to
1088     // interpreted if that is the case treat it as a miss so we can get
1089     // the call site corrected.
1090     __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
1091     __ bind(ok2);
1092     __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup);
1093     __ delayed()->nop();
1094     __ jump_to(ic_miss, G3_scratch);
1095     __ delayed()->nop();
1096 
1097   }
1098 
1099   address c2i_entry = __ pc();
1100   AdapterGenerator agen(masm);
1101   agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup);
1102 
1103   __ flush();
1104   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1105 
1106 }
1107 
1108 // Helper function for native calling conventions
1109 static VMReg int_stk_helper( int i ) {
1110   // Bias any stack based VMReg we get by ignoring the window area
1111   // but not the register parameter save area.
1112   //
1113   // This is strange for the following reasons. We'd normally expect
1114   // the calling convention to return an VMReg for a stack slot
1115   // completely ignoring any abi reserved area. C2 thinks of that
1116   // abi area as only out_preserve_stack_slots. This does not include
1117   // the area allocated by the C abi to store down integer arguments
1118   // because the java calling convention does not use it. So
1119   // since c2 assumes that there are only out_preserve_stack_slots
1120   // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1121   // location the c calling convention must add in this bias amount
1122   // to make up for the fact that the out_preserve_stack_slots is
1123   // insufficient for C calls. What a mess. I sure hope those 6
1124   // stack words were worth it on every java call!
1125 
1126   // Another way of cleaning this up would be for out_preserve_stack_slots
1127   // to take a parameter to say whether it was C or java calling conventions.
1128   // Then things might look a little better (but not much).
1129 
1130   int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1131   if( mem_parm_offset < 0 ) {
1132     return as_oRegister(i)->as_VMReg();
1133   } else {
1134     int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1135     // Now return a biased offset that will be correct when out_preserve_slots is added back in
1136     return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1137   }
1138 }
1139 
1140 
1141 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1142                                          VMRegPair *regs,
1143                                          VMRegPair *regs2,
1144                                          int total_args_passed) {
1145     assert(regs2 == NULL, "not needed on sparc");
1146 
1147     // Return the number of VMReg stack_slots needed for the args.
1148     // This value does not include an abi space (like register window
1149     // save area).
1150 
1151     // The native convention is V8 if !LP64
1152     // The LP64 convention is the V9 convention which is slightly more sane.
1153 
1154     // We return the amount of VMReg stack slots we need to reserve for all
1155     // the arguments NOT counting out_preserve_stack_slots. Since we always
1156     // have space for storing at least 6 registers to memory we start with that.
1157     // See int_stk_helper for a further discussion.
1158     int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1159 
1160 #ifdef _LP64
1161     // V9 convention: All things "as-if" on double-wide stack slots.
1162     // Hoist any int/ptr/long's in the first 6 to int regs.
1163     // Hoist any flt/dbl's in the first 16 dbl regs.
1164     int j = 0;                  // Count of actual args, not HALVES
1165     VMRegPair param_array_reg;  // location of the argument in the parameter array
1166     for (int i = 0; i < total_args_passed; i++, j++) {
1167       param_array_reg.set_bad();
1168       switch (sig_bt[i]) {
1169       case T_BOOLEAN:
1170       case T_BYTE:
1171       case T_CHAR:
1172       case T_INT:
1173       case T_SHORT:
1174         regs[i].set1(int_stk_helper(j));
1175         break;
1176       case T_LONG:
1177         assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
1178       case T_ADDRESS: // raw pointers, like current thread, for VM calls
1179       case T_ARRAY:
1180       case T_OBJECT:
1181       case T_METADATA:
1182         regs[i].set2(int_stk_helper(j));
1183         break;
1184       case T_FLOAT:
1185         // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
1186         // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
1187         //
1188         // "When a callee prototype exists, and does not indicate variable arguments,
1189         // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
1190         // will be promoted to floating-point registers"
1191         //
1192         // By "promoted" it means that the argument is located in two places, an unused
1193         // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
1194         // float register.  In most cases, there are 6 or fewer arguments of any type,
1195         // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
1196         // serve as shadow slots.  Per the spec floating point registers %d6 to %d16
1197         // require slots beyond that (up to %sp+BIAS+248).
1198         //
1199         {
1200           // V9ism: floats go in ODD registers and stack slots
1201           int float_index = 1 + (j << 1);
1202           param_array_reg.set1(VMRegImpl::stack2reg(float_index));
1203           if (j < 16) {
1204             regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
1205           } else {
1206             regs[i] = param_array_reg;
1207           }
1208         }
1209         break;
1210       case T_DOUBLE:
1211         {
1212           assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1213           // V9ism: doubles go in EVEN/ODD regs and stack slots
1214           int double_index = (j << 1);
1215           param_array_reg.set2(VMRegImpl::stack2reg(double_index));
1216           if (j < 16) {
1217             regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
1218           } else {
1219             // V9ism: doubles go in EVEN/ODD stack slots
1220             regs[i] = param_array_reg;
1221           }
1222         }
1223         break;
1224       case T_VOID:
1225         regs[i].set_bad();
1226         j--;
1227         break; // Do not count HALVES
1228       default:
1229         ShouldNotReachHere();
1230       }
1231       // Keep track of the deepest parameter array slot.
1232       if (!param_array_reg.first()->is_valid()) {
1233         param_array_reg = regs[i];
1234       }
1235       if (param_array_reg.first()->is_stack()) {
1236         int off = param_array_reg.first()->reg2stack();
1237         if (off > max_stack_slots) max_stack_slots = off;
1238       }
1239       if (param_array_reg.second()->is_stack()) {
1240         int off = param_array_reg.second()->reg2stack();
1241         if (off > max_stack_slots) max_stack_slots = off;
1242       }
1243     }
1244 
1245 #else // _LP64
1246     // V8 convention: first 6 things in O-regs, rest on stack.
1247     // Alignment is willy-nilly.
1248     for (int i = 0; i < total_args_passed; i++) {
1249       switch (sig_bt[i]) {
1250       case T_ADDRESS: // raw pointers, like current thread, for VM calls
1251       case T_ARRAY:
1252       case T_BOOLEAN:
1253       case T_BYTE:
1254       case T_CHAR:
1255       case T_FLOAT:
1256       case T_INT:
1257       case T_OBJECT:
1258       case T_METADATA:
1259       case T_SHORT:
1260         regs[i].set1(int_stk_helper(i));
1261         break;
1262       case T_DOUBLE:
1263       case T_LONG:
1264         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1265         regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
1266         break;
1267       case T_VOID: regs[i].set_bad(); break;
1268       default:
1269         ShouldNotReachHere();
1270       }
1271       if (regs[i].first()->is_stack()) {
1272         int off = regs[i].first()->reg2stack();
1273         if (off > max_stack_slots) max_stack_slots = off;
1274       }
1275       if (regs[i].second()->is_stack()) {
1276         int off = regs[i].second()->reg2stack();
1277         if (off > max_stack_slots) max_stack_slots = off;
1278       }
1279     }
1280 #endif // _LP64
1281 
1282   return round_to(max_stack_slots + 1, 2);
1283 
1284 }
1285 
1286 
1287 // ---------------------------------------------------------------------------
1288 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1289   switch (ret_type) {
1290   case T_FLOAT:
1291     __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1292     break;
1293   case T_DOUBLE:
1294     __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1295     break;
1296   }
1297 }
1298 
1299 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1300   switch (ret_type) {
1301   case T_FLOAT:
1302     __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1303     break;
1304   case T_DOUBLE:
1305     __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1306     break;
1307   }
1308 }
1309 
1310 // Check and forward and pending exception.  Thread is stored in
1311 // L7_thread_cache and possibly NOT in G2_thread.  Since this is a native call, there
1312 // is no exception handler.  We merely pop this frame off and throw the
1313 // exception in the caller's frame.
1314 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1315   Label L;
1316   __ br_null(Rex_oop, false, Assembler::pt, L);
1317   __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1318   // Since this is a native call, we *know* the proper exception handler
1319   // without calling into the VM: it's the empty function.  Just pop this
1320   // frame and then jump to forward_exception_entry; O7 will contain the
1321   // native caller's return PC.
1322  AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1323   __ jump_to(exception_entry, G3_scratch);
1324   __ delayed()->restore();      // Pop this frame off.
1325   __ bind(L);
1326 }
1327 
1328 // A simple move of integer like type
1329 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1330   if (src.first()->is_stack()) {
1331     if (dst.first()->is_stack()) {
1332       // stack to stack
1333       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1334       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1335     } else {
1336       // stack to reg
1337       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1338     }
1339   } else if (dst.first()->is_stack()) {
1340     // reg to stack
1341     __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1342   } else {
1343     __ mov(src.first()->as_Register(), dst.first()->as_Register());
1344   }
1345 }
1346 
1347 // On 64 bit we will store integer like items to the stack as
1348 // 64 bits items (sparc abi) even though java would only store
1349 // 32bits for a parameter. On 32bit it will simply be 32 bits
1350 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1351 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1352   if (src.first()->is_stack()) {
1353     if (dst.first()->is_stack()) {
1354       // stack to stack
1355       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1356       __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1357     } else {
1358       // stack to reg
1359       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1360     }
1361   } else if (dst.first()->is_stack()) {
1362     // reg to stack
1363     // Some compilers (gcc) expect a clean 32 bit value on function entry
1364     __ signx(src.first()->as_Register(), L5);
1365     __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1366   } else {
1367     // Some compilers (gcc) expect a clean 32 bit value on function entry
1368     __ signx(src.first()->as_Register(), dst.first()->as_Register());
1369   }
1370 }
1371 
1372 
1373 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1374   if (src.first()->is_stack()) {
1375     if (dst.first()->is_stack()) {
1376       // stack to stack
1377       __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1378       __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1379     } else {
1380       // stack to reg
1381       __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1382     }
1383   } else if (dst.first()->is_stack()) {
1384     // reg to stack
1385     __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1386   } else {
1387     __ mov(src.first()->as_Register(), dst.first()->as_Register());
1388   }
1389 }
1390 
1391 
1392 // An oop arg. Must pass a handle not the oop itself
1393 static void object_move(MacroAssembler* masm,
1394                         OopMap* map,
1395                         int oop_handle_offset,
1396                         int framesize_in_slots,
1397                         VMRegPair src,
1398                         VMRegPair dst,
1399                         bool is_receiver,
1400                         int* receiver_offset) {
1401 
1402   // must pass a handle. First figure out the location we use as a handle
1403 
1404   if (src.first()->is_stack()) {
1405     // Oop is already on the stack
1406     Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1407     __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1408     __ ld_ptr(rHandle, 0, L4);
1409 #ifdef _LP64
1410     __ movr( Assembler::rc_z, L4, G0, rHandle );
1411 #else
1412     __ tst( L4 );
1413     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1414 #endif
1415     if (dst.first()->is_stack()) {
1416       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1417     }
1418     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1419     if (is_receiver) {
1420       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1421     }
1422     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1423   } else {
1424     // Oop is in an input register pass we must flush it to the stack
1425     const Register rOop = src.first()->as_Register();
1426     const Register rHandle = L5;
1427     int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1428     int offset = oop_slot * VMRegImpl::stack_slot_size;
1429     __ st_ptr(rOop, SP, offset + STACK_BIAS);
1430     if (is_receiver) {
1431        *receiver_offset = offset;
1432     }
1433     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1434     __ add(SP, offset + STACK_BIAS, rHandle);
1435 #ifdef _LP64
1436     __ movr( Assembler::rc_z, rOop, G0, rHandle );
1437 #else
1438     __ tst( rOop );
1439     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1440 #endif
1441 
1442     if (dst.first()->is_stack()) {
1443       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1444     } else {
1445       __ mov(rHandle, dst.first()->as_Register());
1446     }
1447   }
1448 }
1449 
1450 // A float arg may have to do float reg int reg conversion
1451 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1452   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1453 
1454   if (src.first()->is_stack()) {
1455     if (dst.first()->is_stack()) {
1456       // stack to stack the easiest of the bunch
1457       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1458       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1459     } else {
1460       // stack to reg
1461       if (dst.first()->is_Register()) {
1462         __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1463       } else {
1464         __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1465       }
1466     }
1467   } else if (dst.first()->is_stack()) {
1468     // reg to stack
1469     if (src.first()->is_Register()) {
1470       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1471     } else {
1472       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1473     }
1474   } else {
1475     // reg to reg
1476     if (src.first()->is_Register()) {
1477       if (dst.first()->is_Register()) {
1478         // gpr -> gpr
1479         __ mov(src.first()->as_Register(), dst.first()->as_Register());
1480       } else {
1481         // gpr -> fpr
1482         __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1483         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1484       }
1485     } else if (dst.first()->is_Register()) {
1486       // fpr -> gpr
1487       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1488       __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1489     } else {
1490       // fpr -> fpr
1491       // In theory these overlap but the ordering is such that this is likely a nop
1492       if ( src.first() != dst.first()) {
1493         __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1494       }
1495     }
1496   }
1497 }
1498 
1499 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1500   VMRegPair src_lo(src.first());
1501   VMRegPair src_hi(src.second());
1502   VMRegPair dst_lo(dst.first());
1503   VMRegPair dst_hi(dst.second());
1504   simple_move32(masm, src_lo, dst_lo);
1505   simple_move32(masm, src_hi, dst_hi);
1506 }
1507 
1508 // A long move
1509 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1510 
1511   // Do the simple ones here else do two int moves
1512   if (src.is_single_phys_reg() ) {
1513     if (dst.is_single_phys_reg()) {
1514       __ mov(src.first()->as_Register(), dst.first()->as_Register());
1515     } else {
1516       // split src into two separate registers
1517       // Remember hi means hi address or lsw on sparc
1518       // Move msw to lsw
1519       if (dst.second()->is_reg()) {
1520         // MSW -> MSW
1521         __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1522         // Now LSW -> LSW
1523         // this will only move lo -> lo and ignore hi
1524         VMRegPair split(dst.second());
1525         simple_move32(masm, src, split);
1526       } else {
1527         VMRegPair split(src.first(), L4->as_VMReg());
1528         // MSW -> MSW (lo ie. first word)
1529         __ srax(src.first()->as_Register(), 32, L4);
1530         split_long_move(masm, split, dst);
1531       }
1532     }
1533   } else if (dst.is_single_phys_reg()) {
1534     if (src.is_adjacent_aligned_on_stack(2)) {
1535       __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1536     } else {
1537       // dst is a single reg.
1538       // Remember lo is low address not msb for stack slots
1539       // and lo is the "real" register for registers
1540       // src is
1541 
1542       VMRegPair split;
1543 
1544       if (src.first()->is_reg()) {
1545         // src.lo (msw) is a reg, src.hi is stk/reg
1546         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1547         split.set_pair(dst.first(), src.first());
1548       } else {
1549         // msw is stack move to L5
1550         // lsw is stack move to dst.lo (real reg)
1551         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1552         split.set_pair(dst.first(), L5->as_VMReg());
1553       }
1554 
1555       // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1556       // msw   -> src.lo/L5,  lsw -> dst.lo
1557       split_long_move(masm, src, split);
1558 
1559       // So dst now has the low order correct position the
1560       // msw half
1561       __ sllx(split.first()->as_Register(), 32, L5);
1562 
1563       const Register d = dst.first()->as_Register();
1564       __ or3(L5, d, d);
1565     }
1566   } else {
1567     // For LP64 we can probably do better.
1568     split_long_move(masm, src, dst);
1569   }
1570 }
1571 
1572 // A double move
1573 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1574 
1575   // The painful thing here is that like long_move a VMRegPair might be
1576   // 1: a single physical register
1577   // 2: two physical registers (v8)
1578   // 3: a physical reg [lo] and a stack slot [hi] (v8)
1579   // 4: two stack slots
1580 
1581   // Since src is always a java calling convention we know that the src pair
1582   // is always either all registers or all stack (and aligned?)
1583 
1584   // in a register [lo] and a stack slot [hi]
1585   if (src.first()->is_stack()) {
1586     if (dst.first()->is_stack()) {
1587       // stack to stack the easiest of the bunch
1588       // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1589       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1590       __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1591       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1592       __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1593     } else {
1594       // stack to reg
1595       if (dst.second()->is_stack()) {
1596         // stack -> reg, stack -> stack
1597         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1598         if (dst.first()->is_Register()) {
1599           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1600         } else {
1601           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1602         }
1603         // This was missing. (very rare case)
1604         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1605       } else {
1606         // stack -> reg
1607         // Eventually optimize for alignment QQQ
1608         if (dst.first()->is_Register()) {
1609           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1610           __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1611         } else {
1612           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1613           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1614         }
1615       }
1616     }
1617   } else if (dst.first()->is_stack()) {
1618     // reg to stack
1619     if (src.first()->is_Register()) {
1620       // Eventually optimize for alignment QQQ
1621       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1622       if (src.second()->is_stack()) {
1623         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1624         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1625       } else {
1626         __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1627       }
1628     } else {
1629       // fpr to stack
1630       if (src.second()->is_stack()) {
1631         ShouldNotReachHere();
1632       } else {
1633         // Is the stack aligned?
1634         if (reg2offset(dst.first()) & 0x7) {
1635           // No do as pairs
1636           __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1637           __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1638         } else {
1639           __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1640         }
1641       }
1642     }
1643   } else {
1644     // reg to reg
1645     if (src.first()->is_Register()) {
1646       if (dst.first()->is_Register()) {
1647         // gpr -> gpr
1648         __ mov(src.first()->as_Register(), dst.first()->as_Register());
1649         __ mov(src.second()->as_Register(), dst.second()->as_Register());
1650       } else {
1651         // gpr -> fpr
1652         // ought to be able to do a single store
1653         __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1654         __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1655         // ought to be able to do a single load
1656         __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1657         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1658       }
1659     } else if (dst.first()->is_Register()) {
1660       // fpr -> gpr
1661       // ought to be able to do a single store
1662       __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1663       // ought to be able to do a single load
1664       // REMEMBER first() is low address not LSB
1665       __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1666       if (dst.second()->is_Register()) {
1667         __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1668       } else {
1669         __ ld(FP, -4 + STACK_BIAS, L4);
1670         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1671       }
1672     } else {
1673       // fpr -> fpr
1674       // In theory these overlap but the ordering is such that this is likely a nop
1675       if ( src.first() != dst.first()) {
1676         __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1677       }
1678     }
1679   }
1680 }
1681 
1682 // Creates an inner frame if one hasn't already been created, and
1683 // saves a copy of the thread in L7_thread_cache
1684 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1685   if (!*already_created) {
1686     __ save_frame(0);
1687     // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1688     // Don't use save_thread because it smashes G2 and we merely want to save a
1689     // copy
1690     __ mov(G2_thread, L7_thread_cache);
1691     *already_created = true;
1692   }
1693 }
1694 
1695 
1696 static void save_or_restore_arguments(MacroAssembler* masm,
1697                                       const int stack_slots,
1698                                       const int total_in_args,
1699                                       const int arg_save_area,
1700                                       OopMap* map,
1701                                       VMRegPair* in_regs,
1702                                       BasicType* in_sig_bt) {
1703   // if map is non-NULL then the code should store the values,
1704   // otherwise it should load them.
1705   if (map != NULL) {
1706     // Fill in the map
1707     for (int i = 0; i < total_in_args; i++) {
1708       if (in_sig_bt[i] == T_ARRAY) {
1709         if (in_regs[i].first()->is_stack()) {
1710           int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1711           map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1712         } else if (in_regs[i].first()->is_Register()) {
1713           map->set_oop(in_regs[i].first());
1714         } else {
1715           ShouldNotReachHere();
1716         }
1717       }
1718     }
1719   }
1720 
1721   // Save or restore double word values
1722   int handle_index = 0;
1723   for (int i = 0; i < total_in_args; i++) {
1724     int slot = handle_index + arg_save_area;
1725     int offset = slot * VMRegImpl::stack_slot_size;
1726     if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
1727       const Register reg = in_regs[i].first()->as_Register();
1728       if (reg->is_global()) {
1729         handle_index += 2;
1730         assert(handle_index <= stack_slots, "overflow");
1731         if (map != NULL) {
1732           __ stx(reg, SP, offset + STACK_BIAS);
1733         } else {
1734           __ ldx(SP, offset + STACK_BIAS, reg);
1735         }
1736       }
1737     } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
1738       handle_index += 2;
1739       assert(handle_index <= stack_slots, "overflow");
1740       if (map != NULL) {
1741         __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1742       } else {
1743         __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1744       }
1745     }
1746   }
1747   // Save floats
1748   for (int i = 0; i < total_in_args; i++) {
1749     int slot = handle_index + arg_save_area;
1750     int offset = slot * VMRegImpl::stack_slot_size;
1751     if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
1752       handle_index++;
1753       assert(handle_index <= stack_slots, "overflow");
1754       if (map != NULL) {
1755         __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1756       } else {
1757         __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1758       }
1759     }
1760   }
1761 
1762 }
1763 
1764 
1765 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1766 // keeps a new JNI critical region from starting until a GC has been
1767 // forced.  Save down any oops in registers and describe them in an
1768 // OopMap.
1769 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1770                                                const int stack_slots,
1771                                                const int total_in_args,
1772                                                const int arg_save_area,
1773                                                OopMapSet* oop_maps,
1774                                                VMRegPair* in_regs,
1775                                                BasicType* in_sig_bt) {
1776   __ block_comment("check GCLocker::needs_gc");
1777   Label cont;
1778   AddressLiteral sync_state(GCLocker::needs_gc_address());
1779   __ load_bool_contents(sync_state, G3_scratch);
1780   __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
1781   __ delayed()->nop();
1782 
1783   // Save down any values that are live in registers and call into the
1784   // runtime to halt for a GC
1785   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1786   save_or_restore_arguments(masm, stack_slots, total_in_args,
1787                             arg_save_area, map, in_regs, in_sig_bt);
1788 
1789   __ mov(G2_thread, L7_thread_cache);
1790 
1791   __ set_last_Java_frame(SP, noreg);
1792 
1793   __ block_comment("block_for_jni_critical");
1794   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
1795   __ delayed()->mov(L7_thread_cache, O0);
1796   oop_maps->add_gc_map( __ offset(), map);
1797 
1798   __ restore_thread(L7_thread_cache); // restore G2_thread
1799   __ reset_last_Java_frame();
1800 
1801   // Reload all the register arguments
1802   save_or_restore_arguments(masm, stack_slots, total_in_args,
1803                             arg_save_area, NULL, in_regs, in_sig_bt);
1804 
1805   __ bind(cont);
1806 #ifdef ASSERT
1807   if (StressCriticalJNINatives) {
1808     // Stress register saving
1809     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1810     save_or_restore_arguments(masm, stack_slots, total_in_args,
1811                               arg_save_area, map, in_regs, in_sig_bt);
1812     // Destroy argument registers
1813     for (int i = 0; i < total_in_args; i++) {
1814       if (in_regs[i].first()->is_Register()) {
1815         const Register reg = in_regs[i].first()->as_Register();
1816         if (reg->is_global()) {
1817           __ mov(G0, reg);
1818         }
1819       } else if (in_regs[i].first()->is_FloatRegister()) {
1820         __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
1821       }
1822     }
1823 
1824     save_or_restore_arguments(masm, stack_slots, total_in_args,
1825                               arg_save_area, NULL, in_regs, in_sig_bt);
1826   }
1827 #endif
1828 }
1829 
1830 // Unpack an array argument into a pointer to the body and the length
1831 // if the array is non-null, otherwise pass 0 for both.
1832 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1833   // Pass the length, ptr pair
1834   Label is_null, done;
1835   if (reg.first()->is_stack()) {
1836     VMRegPair tmp  = reg64_to_VMRegPair(L2);
1837     // Load the arg up from the stack
1838     move_ptr(masm, reg, tmp);
1839     reg = tmp;
1840   }
1841   __ cmp(reg.first()->as_Register(), G0);
1842   __ brx(Assembler::equal, false, Assembler::pt, is_null);
1843   __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
1844   move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
1845   __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
1846   move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
1847   __ ba_short(done);
1848   __ bind(is_null);
1849   // Pass zeros
1850   move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
1851   move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
1852   __ bind(done);
1853 }
1854 
1855 static void verify_oop_args(MacroAssembler* masm,
1856                             methodHandle method,
1857                             const BasicType* sig_bt,
1858                             const VMRegPair* regs) {
1859   Register temp_reg = G5_method;  // not part of any compiled calling seq
1860   if (VerifyOops) {
1861     for (int i = 0; i < method->size_of_parameters(); i++) {
1862       if (sig_bt[i] == T_OBJECT ||
1863           sig_bt[i] == T_ARRAY) {
1864         VMReg r = regs[i].first();
1865         assert(r->is_valid(), "bad oop arg");
1866         if (r->is_stack()) {
1867           RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1868           ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
1869           __ ld_ptr(SP, ld_off, temp_reg);
1870           __ verify_oop(temp_reg);
1871         } else {
1872           __ verify_oop(r->as_Register());
1873         }
1874       }
1875     }
1876   }
1877 }
1878 
1879 static void gen_special_dispatch(MacroAssembler* masm,
1880                                  methodHandle method,
1881                                  const BasicType* sig_bt,
1882                                  const VMRegPair* regs) {
1883   verify_oop_args(masm, method, sig_bt, regs);
1884   vmIntrinsics::ID iid = method->intrinsic_id();
1885 
1886   // Now write the args into the outgoing interpreter space
1887   bool     has_receiver   = false;
1888   Register receiver_reg   = noreg;
1889   int      member_arg_pos = -1;
1890   Register member_reg     = noreg;
1891   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1892   if (ref_kind != 0) {
1893     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1894     member_reg = G5_method;  // known to be free at this point
1895     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1896   } else if (iid == vmIntrinsics::_invokeBasic) {
1897     has_receiver = true;
1898   } else {
1899     fatal("unexpected intrinsic id %d", iid);
1900   }
1901 
1902   if (member_reg != noreg) {
1903     // Load the member_arg into register, if necessary.
1904     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1905     VMReg r = regs[member_arg_pos].first();
1906     if (r->is_stack()) {
1907       RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1908       ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1909       __ ld_ptr(SP, ld_off, member_reg);
1910     } else {
1911       // no data motion is needed
1912       member_reg = r->as_Register();
1913     }
1914   }
1915 
1916   if (has_receiver) {
1917     // Make sure the receiver is loaded into a register.
1918     assert(method->size_of_parameters() > 0, "oob");
1919     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1920     VMReg r = regs[0].first();
1921     assert(r->is_valid(), "bad receiver arg");
1922     if (r->is_stack()) {
1923       // Porting note:  This assumes that compiled calling conventions always
1924       // pass the receiver oop in a register.  If this is not true on some
1925       // platform, pick a temp and load the receiver from stack.
1926       fatal("receiver always in a register");
1927       receiver_reg = G3_scratch;  // known to be free at this point
1928       RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1929       ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1930       __ ld_ptr(SP, ld_off, receiver_reg);
1931     } else {
1932       // no data motion is needed
1933       receiver_reg = r->as_Register();
1934     }
1935   }
1936 
1937   // Figure out which address we are really jumping to:
1938   MethodHandles::generate_method_handle_dispatch(masm, iid,
1939                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1940 }
1941 
1942 // ---------------------------------------------------------------------------
1943 // Generate a native wrapper for a given method.  The method takes arguments
1944 // in the Java compiled code convention, marshals them to the native
1945 // convention (handlizes oops, etc), transitions to native, makes the call,
1946 // returns to java state (possibly blocking), unhandlizes any result and
1947 // returns.
1948 //
1949 // Critical native functions are a shorthand for the use of
1950 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1951 // functions.  The wrapper is expected to unpack the arguments before
1952 // passing them to the callee and perform checks before and after the
1953 // native call to ensure that they GCLocker
1954 // lock_critical/unlock_critical semantics are followed.  Some other
1955 // parts of JNI setup are skipped like the tear down of the JNI handle
1956 // block and the check for pending exceptions it's impossible for them
1957 // to be thrown.
1958 //
1959 // They are roughly structured like this:
1960 //    if (GCLocker::needs_gc())
1961 //      SharedRuntime::block_for_jni_critical();
1962 //    tranistion to thread_in_native
1963 //    unpack arrray arguments and call native entry point
1964 //    check for safepoint in progress
1965 //    check if any thread suspend flags are set
1966 //      call into JVM and possible unlock the JNI critical
1967 //      if a GC was suppressed while in the critical native.
1968 //    transition back to thread_in_Java
1969 //    return to caller
1970 //
1971 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1972                                                 const methodHandle& method,
1973                                                 int compile_id,
1974                                                 BasicType* in_sig_bt,
1975                                                 VMRegPair* in_regs,
1976                                                 BasicType ret_type) {
1977   if (method->is_method_handle_intrinsic()) {
1978     vmIntrinsics::ID iid = method->intrinsic_id();
1979     intptr_t start = (intptr_t)__ pc();
1980     int vep_offset = ((intptr_t)__ pc()) - start;
1981     gen_special_dispatch(masm,
1982                          method,
1983                          in_sig_bt,
1984                          in_regs);
1985     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1986     __ flush();
1987     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1988     return nmethod::new_native_nmethod(method,
1989                                        compile_id,
1990                                        masm->code(),
1991                                        vep_offset,
1992                                        frame_complete,
1993                                        stack_slots / VMRegImpl::slots_per_word,
1994                                        in_ByteSize(-1),
1995                                        in_ByteSize(-1),
1996                                        (OopMapSet*)NULL);
1997   }
1998   bool is_critical_native = true;
1999   address native_func = method->critical_native_function();
2000   if (native_func == NULL) {
2001     native_func = method->native_function();
2002     is_critical_native = false;
2003   }
2004   assert(native_func != NULL, "must have function");
2005 
2006   // Native nmethod wrappers never take possesion of the oop arguments.
2007   // So the caller will gc the arguments. The only thing we need an
2008   // oopMap for is if the call is static
2009   //
2010   // An OopMap for lock (and class if static), and one for the VM call itself
2011   OopMapSet *oop_maps = new OopMapSet();
2012   intptr_t start = (intptr_t)__ pc();
2013 
2014   // First thing make an ic check to see if we should even be here
2015   {
2016     Label L;
2017     const Register temp_reg = G3_scratch;
2018     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2019     __ verify_oop(O0);
2020     __ load_klass(O0, temp_reg);
2021     __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
2022 
2023     __ jump_to(ic_miss, temp_reg);
2024     __ delayed()->nop();
2025     __ align(CodeEntryAlignment);
2026     __ bind(L);
2027   }
2028 
2029   int vep_offset = ((intptr_t)__ pc()) - start;
2030 
2031 #ifdef COMPILER1
2032   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2033     // Object.hashCode, System.identityHashCode can pull the hashCode from the
2034     // header word instead of doing a full VM transition once it's been computed.
2035     // Since hashCode is usually polymorphic at call sites we can't do this
2036     // optimization at the call site without a lot of work.
2037     Label slowCase;
2038     Label done;
2039     Register obj_reg              = O0;
2040     Register result               = O0;
2041     Register header               = G3_scratch;
2042     Register hash                 = G3_scratch; // overwrite header value with hash value
2043     Register mask                 = G1;         // to get hash field from header
2044 
2045     // Unlike for Object.hashCode, System.identityHashCode is static method and
2046     // gets object as argument instead of the receiver.
2047     if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) {
2048       assert(method->is_static(), "method should be static");
2049       // return 0 for null reference input
2050       __ br_null(obj_reg, false, Assembler::pn, done);
2051       __ delayed()->mov(obj_reg, hash);
2052     }
2053 
2054     // Read the header and build a mask to get its hash field.  Give up if the object is not unlocked.
2055     // We depend on hash_mask being at most 32 bits and avoid the use of
2056     // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
2057     // vm: see markOop.hpp.
2058     __ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header);
2059     __ sethi(markOopDesc::hash_mask, mask);
2060     __ btst(markOopDesc::unlocked_value, header);
2061     __ br(Assembler::zero, false, Assembler::pn, slowCase);
2062     if (UseBiasedLocking) {
2063       // Check if biased and fall through to runtime if so
2064       __ delayed()->nop();
2065       __ btst(markOopDesc::biased_lock_bit_in_place, header);
2066       __ br(Assembler::notZero, false, Assembler::pn, slowCase);
2067     }
2068     __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
2069 
2070     // Check for a valid (non-zero) hash code and get its value.
2071 #ifdef _LP64
2072     __ srlx(header, markOopDesc::hash_shift, hash);
2073 #else
2074     __ srl(header, markOopDesc::hash_shift, hash);
2075 #endif
2076     __ andcc(hash, mask, hash);
2077     __ br(Assembler::equal, false, Assembler::pn, slowCase);
2078     __ delayed()->nop();
2079 
2080     // leaf return.
2081     __ bind(done);
2082     __ retl();
2083     __ delayed()->mov(hash, result);
2084     __ bind(slowCase);
2085   }
2086 #endif // COMPILER1
2087 
2088 
2089   // We have received a description of where all the java arg are located
2090   // on entry to the wrapper. We need to convert these args to where
2091   // the jni function will expect them. To figure out where they go
2092   // we convert the java signature to a C signature by inserting
2093   // the hidden arguments as arg[0] and possibly arg[1] (static method)
2094 
2095   const int total_in_args = method->size_of_parameters();
2096   int total_c_args = total_in_args;
2097   int total_save_slots = 6 * VMRegImpl::slots_per_word;
2098   if (!is_critical_native) {
2099     total_c_args += 1;
2100     if (method->is_static()) {
2101       total_c_args++;
2102     }
2103   } else {
2104     for (int i = 0; i < total_in_args; i++) {
2105       if (in_sig_bt[i] == T_ARRAY) {
2106         // These have to be saved and restored across the safepoint
2107         total_c_args++;
2108       }
2109     }
2110   }
2111 
2112   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2113   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2114   BasicType* in_elem_bt = NULL;
2115 
2116   int argc = 0;
2117   if (!is_critical_native) {
2118     out_sig_bt[argc++] = T_ADDRESS;
2119     if (method->is_static()) {
2120       out_sig_bt[argc++] = T_OBJECT;
2121     }
2122 
2123     for (int i = 0; i < total_in_args ; i++ ) {
2124       out_sig_bt[argc++] = in_sig_bt[i];
2125     }
2126   } else {
2127     Thread* THREAD = Thread::current();
2128     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2129     SignatureStream ss(method->signature());
2130     for (int i = 0; i < total_in_args ; i++ ) {
2131       if (in_sig_bt[i] == T_ARRAY) {
2132         // Arrays are passed as int, elem* pair
2133         out_sig_bt[argc++] = T_INT;
2134         out_sig_bt[argc++] = T_ADDRESS;
2135         Symbol* atype = ss.as_symbol(CHECK_NULL);
2136         const char* at = atype->as_C_string();
2137         if (strlen(at) == 2) {
2138           assert(at[0] == '[', "must be");
2139           switch (at[1]) {
2140             case 'B': in_elem_bt[i]  = T_BYTE; break;
2141             case 'C': in_elem_bt[i]  = T_CHAR; break;
2142             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
2143             case 'F': in_elem_bt[i]  = T_FLOAT; break;
2144             case 'I': in_elem_bt[i]  = T_INT; break;
2145             case 'J': in_elem_bt[i]  = T_LONG; break;
2146             case 'S': in_elem_bt[i]  = T_SHORT; break;
2147             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
2148             default: ShouldNotReachHere();
2149           }
2150         }
2151       } else {
2152         out_sig_bt[argc++] = in_sig_bt[i];
2153         in_elem_bt[i] = T_VOID;
2154       }
2155       if (in_sig_bt[i] != T_VOID) {
2156         assert(in_sig_bt[i] == ss.type(), "must match");
2157         ss.next();
2158       }
2159     }
2160   }
2161 
2162   // Now figure out where the args must be stored and how much stack space
2163   // they require (neglecting out_preserve_stack_slots but space for storing
2164   // the 1st six register arguments). It's weird see int_stk_helper.
2165   //
2166   int out_arg_slots;
2167   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2168 
2169   if (is_critical_native) {
2170     // Critical natives may have to call out so they need a save area
2171     // for register arguments.
2172     int double_slots = 0;
2173     int single_slots = 0;
2174     for ( int i = 0; i < total_in_args; i++) {
2175       if (in_regs[i].first()->is_Register()) {
2176         const Register reg = in_regs[i].first()->as_Register();
2177         switch (in_sig_bt[i]) {
2178           case T_ARRAY:
2179           case T_BOOLEAN:
2180           case T_BYTE:
2181           case T_SHORT:
2182           case T_CHAR:
2183           case T_INT:  assert(reg->is_in(), "don't need to save these"); break;
2184           case T_LONG: if (reg->is_global()) double_slots++; break;
2185           default:  ShouldNotReachHere();
2186         }
2187       } else if (in_regs[i].first()->is_FloatRegister()) {
2188         switch (in_sig_bt[i]) {
2189           case T_FLOAT:  single_slots++; break;
2190           case T_DOUBLE: double_slots++; break;
2191           default:  ShouldNotReachHere();
2192         }
2193       }
2194     }
2195     total_save_slots = double_slots * 2 + single_slots;
2196   }
2197 
2198   // Compute framesize for the wrapper.  We need to handlize all oops in
2199   // registers. We must create space for them here that is disjoint from
2200   // the windowed save area because we have no control over when we might
2201   // flush the window again and overwrite values that gc has since modified.
2202   // (The live window race)
2203   //
2204   // We always just allocate 6 word for storing down these object. This allow
2205   // us to simply record the base and use the Ireg number to decide which
2206   // slot to use. (Note that the reg number is the inbound number not the
2207   // outbound number).
2208   // We must shuffle args to match the native convention, and include var-args space.
2209 
2210   // Calculate the total number of stack slots we will need.
2211 
2212   // First count the abi requirement plus all of the outgoing args
2213   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2214 
2215   // Now the space for the inbound oop handle area
2216 
2217   int oop_handle_offset = round_to(stack_slots, 2);
2218   stack_slots += total_save_slots;
2219 
2220   // Now any space we need for handlizing a klass if static method
2221 
2222   int klass_slot_offset = 0;
2223   int klass_offset = -1;
2224   int lock_slot_offset = 0;
2225   bool is_static = false;
2226 
2227   if (method->is_static()) {
2228     klass_slot_offset = stack_slots;
2229     stack_slots += VMRegImpl::slots_per_word;
2230     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2231     is_static = true;
2232   }
2233 
2234   // Plus a lock if needed
2235 
2236   if (method->is_synchronized()) {
2237     lock_slot_offset = stack_slots;
2238     stack_slots += VMRegImpl::slots_per_word;
2239   }
2240 
2241   // Now a place to save return value or as a temporary for any gpr -> fpr moves
2242   stack_slots += 2;
2243 
2244   // Ok The space we have allocated will look like:
2245   //
2246   //
2247   // FP-> |                     |
2248   //      |---------------------|
2249   //      | 2 slots for moves   |
2250   //      |---------------------|
2251   //      | lock box (if sync)  |
2252   //      |---------------------| <- lock_slot_offset
2253   //      | klass (if static)   |
2254   //      |---------------------| <- klass_slot_offset
2255   //      | oopHandle area      |
2256   //      |---------------------| <- oop_handle_offset
2257   //      | outbound memory     |
2258   //      | based arguments     |
2259   //      |                     |
2260   //      |---------------------|
2261   //      | vararg area         |
2262   //      |---------------------|
2263   //      |                     |
2264   // SP-> | out_preserved_slots |
2265   //
2266   //
2267 
2268 
2269   // Now compute actual number of stack words we need rounding to make
2270   // stack properly aligned.
2271   stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
2272 
2273   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2274 
2275   // Generate stack overflow check before creating frame
2276   __ generate_stack_overflow_check(stack_size);
2277 
2278   // Generate a new frame for the wrapper.
2279   __ save(SP, -stack_size, SP);
2280 
2281   int frame_complete = ((intptr_t)__ pc()) - start;
2282 
2283   __ verify_thread();
2284 
2285   if (is_critical_native) {
2286     check_needs_gc_for_critical_native(masm, stack_slots,  total_in_args,
2287                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2288   }
2289 
2290   //
2291   // We immediately shuffle the arguments so that any vm call we have to
2292   // make from here on out (sync slow path, jvmti, etc.) we will have
2293   // captured the oops from our caller and have a valid oopMap for
2294   // them.
2295 
2296   // -----------------
2297   // The Grand Shuffle
2298   //
2299   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2300   // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2301   // the class mirror instead of a receiver.  This pretty much guarantees that
2302   // register layout will not match.  We ignore these extra arguments during
2303   // the shuffle. The shuffle is described by the two calling convention
2304   // vectors we have in our possession. We simply walk the java vector to
2305   // get the source locations and the c vector to get the destinations.
2306   // Because we have a new window and the argument registers are completely
2307   // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2308   // here.
2309 
2310   // This is a trick. We double the stack slots so we can claim
2311   // the oops in the caller's frame. Since we are sure to have
2312   // more args than the caller doubling is enough to make
2313   // sure we can capture all the incoming oop args from the
2314   // caller.
2315   //
2316   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2317   // Record sp-based slot for receiver on stack for non-static methods
2318   int receiver_offset = -1;
2319 
2320   // We move the arguments backward because the floating point registers
2321   // destination will always be to a register with a greater or equal register
2322   // number or the stack.
2323 
2324 #ifdef ASSERT
2325   bool reg_destroyed[RegisterImpl::number_of_registers];
2326   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2327   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2328     reg_destroyed[r] = false;
2329   }
2330   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2331     freg_destroyed[f] = false;
2332   }
2333 
2334 #endif /* ASSERT */
2335 
2336   for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
2337 
2338 #ifdef ASSERT
2339     if (in_regs[i].first()->is_Register()) {
2340       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2341     } else if (in_regs[i].first()->is_FloatRegister()) {
2342       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2343     }
2344     if (out_regs[c_arg].first()->is_Register()) {
2345       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2346     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2347       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2348     }
2349 #endif /* ASSERT */
2350 
2351     switch (in_sig_bt[i]) {
2352       case T_ARRAY:
2353         if (is_critical_native) {
2354           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
2355           c_arg--;
2356           break;
2357         }
2358       case T_OBJECT:
2359         assert(!is_critical_native, "no oop arguments");
2360         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2361                     ((i == 0) && (!is_static)),
2362                     &receiver_offset);
2363         break;
2364       case T_VOID:
2365         break;
2366 
2367       case T_FLOAT:
2368         float_move(masm, in_regs[i], out_regs[c_arg]);
2369         break;
2370 
2371       case T_DOUBLE:
2372         assert( i + 1 < total_in_args &&
2373                 in_sig_bt[i + 1] == T_VOID &&
2374                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2375         double_move(masm, in_regs[i], out_regs[c_arg]);
2376         break;
2377 
2378       case T_LONG :
2379         long_move(masm, in_regs[i], out_regs[c_arg]);
2380         break;
2381 
2382       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2383 
2384       default:
2385         move32_64(masm, in_regs[i], out_regs[c_arg]);
2386     }
2387   }
2388 
2389   // Pre-load a static method's oop into O1.  Used both by locking code and
2390   // the normal JNI call code.
2391   if (method->is_static() && !is_critical_native) {
2392     __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1);
2393 
2394     // Now handlize the static class mirror in O1.  It's known not-null.
2395     __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2396     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2397     __ add(SP, klass_offset + STACK_BIAS, O1);
2398   }
2399 
2400 
2401   const Register L6_handle = L6;
2402 
2403   if (method->is_synchronized()) {
2404     assert(!is_critical_native, "unhandled");
2405     __ mov(O1, L6_handle);
2406   }
2407 
2408   // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2409   // except O6/O7. So if we must call out we must push a new frame. We immediately
2410   // push a new frame and flush the windows.
2411 #ifdef _LP64
2412   intptr_t thepc = (intptr_t) __ pc();
2413   {
2414     address here = __ pc();
2415     // Call the next instruction
2416     __ call(here + 8, relocInfo::none);
2417     __ delayed()->nop();
2418   }
2419 #else
2420   intptr_t thepc = __ load_pc_address(O7, 0);
2421 #endif /* _LP64 */
2422 
2423   // We use the same pc/oopMap repeatedly when we call out
2424   oop_maps->add_gc_map(thepc - start, map);
2425 
2426   // O7 now has the pc loaded that we will use when we finally call to native.
2427 
2428   // Save thread in L7; it crosses a bunch of VM calls below
2429   // Don't use save_thread because it smashes G2 and we merely
2430   // want to save a copy
2431   __ mov(G2_thread, L7_thread_cache);
2432 
2433 
2434   // If we create an inner frame once is plenty
2435   // when we create it we must also save G2_thread
2436   bool inner_frame_created = false;
2437 
2438   // dtrace method entry support
2439   {
2440     SkipIfEqual skip_if(
2441       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2442     // create inner frame
2443     __ save_frame(0);
2444     __ mov(G2_thread, L7_thread_cache);
2445     __ set_metadata_constant(method(), O1);
2446     __ call_VM_leaf(L7_thread_cache,
2447          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2448          G2_thread, O1);
2449     __ restore();
2450   }
2451 
2452   // RedefineClasses() tracing support for obsolete method entry
2453   if (log_is_enabled(Trace, redefine, class, obsolete)) {
2454     // create inner frame
2455     __ save_frame(0);
2456     __ mov(G2_thread, L7_thread_cache);
2457     __ set_metadata_constant(method(), O1);
2458     __ call_VM_leaf(L7_thread_cache,
2459          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2460          G2_thread, O1);
2461     __ restore();
2462   }
2463 
2464   // We are in the jni frame unless saved_frame is true in which case
2465   // we are in one frame deeper (the "inner" frame). If we are in the
2466   // "inner" frames the args are in the Iregs and if the jni frame then
2467   // they are in the Oregs.
2468   // If we ever need to go to the VM (for locking, jvmti) then
2469   // we will always be in the "inner" frame.
2470 
2471   // Lock a synchronized method
2472   int lock_offset = -1;         // Set if locked
2473   if (method->is_synchronized()) {
2474     Register Roop = O1;
2475     const Register L3_box = L3;
2476 
2477     create_inner_frame(masm, &inner_frame_created);
2478 
2479     __ ld_ptr(I1, 0, O1);
2480     Label done;
2481 
2482     lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2483     __ add(FP, lock_offset+STACK_BIAS, L3_box);
2484 #ifdef ASSERT
2485     if (UseBiasedLocking) {
2486       // making the box point to itself will make it clear it went unused
2487       // but also be obviously invalid
2488       __ st_ptr(L3_box, L3_box, 0);
2489     }
2490 #endif // ASSERT
2491     //
2492     // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2493     //
2494     __ compiler_lock_object(Roop, L1,    L3_box, L2);
2495     __ br(Assembler::equal, false, Assembler::pt, done);
2496     __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2497 
2498 
2499     // None of the above fast optimizations worked so we have to get into the
2500     // slow case of monitor enter.  Inline a special case of call_VM that
2501     // disallows any pending_exception.
2502     __ mov(Roop, O0);            // Need oop in O0
2503     __ mov(L3_box, O1);
2504 
2505     // Record last_Java_sp, in case the VM code releases the JVM lock.
2506 
2507     __ set_last_Java_frame(FP, I7);
2508 
2509     // do the call
2510     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2511     __ delayed()->mov(L7_thread_cache, O2);
2512 
2513     __ restore_thread(L7_thread_cache); // restore G2_thread
2514     __ reset_last_Java_frame();
2515 
2516 #ifdef ASSERT
2517     { Label L;
2518     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2519     __ br_null_short(O0, Assembler::pt, L);
2520     __ stop("no pending exception allowed on exit from IR::monitorenter");
2521     __ bind(L);
2522     }
2523 #endif
2524     __ bind(done);
2525   }
2526 
2527 
2528   // Finally just about ready to make the JNI call
2529 
2530   __ flushw();
2531   if (inner_frame_created) {
2532     __ restore();
2533   } else {
2534     // Store only what we need from this frame
2535     // QQQ I think that non-v9 (like we care) we don't need these saves
2536     // either as the flush traps and the current window goes too.
2537     __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2538     __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2539   }
2540 
2541   // get JNIEnv* which is first argument to native
2542   if (!is_critical_native) {
2543     __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2544   }
2545 
2546   // Use that pc we placed in O7 a while back as the current frame anchor
2547   __ set_last_Java_frame(SP, O7);
2548 
2549   // We flushed the windows ages ago now mark them as flushed before transitioning.
2550   __ set(JavaFrameAnchor::flushed, G3_scratch);
2551   __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2552 
2553   // Transition from _thread_in_Java to _thread_in_native.
2554   __ set(_thread_in_native, G3_scratch);
2555 
2556 #ifdef _LP64
2557   AddressLiteral dest(native_func);
2558   __ relocate(relocInfo::runtime_call_type);
2559   __ jumpl_to(dest, O7, O7);
2560 #else
2561   __ call(native_func, relocInfo::runtime_call_type);
2562 #endif
2563   __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2564 
2565   __ restore_thread(L7_thread_cache); // restore G2_thread
2566 
2567   // Unpack native results.  For int-types, we do any needed sign-extension
2568   // and move things into I0.  The return value there will survive any VM
2569   // calls for blocking or unlocking.  An FP or OOP result (handle) is done
2570   // specially in the slow-path code.
2571   switch (ret_type) {
2572   case T_VOID:    break;        // Nothing to do!
2573   case T_FLOAT:   break;        // Got it where we want it (unless slow-path)
2574   case T_DOUBLE:  break;        // Got it where we want it (unless slow-path)
2575   // In 64 bits build result is in O0, in O0, O1 in 32bit build
2576   case T_LONG:
2577 #ifndef _LP64
2578                   __ mov(O1, I1);
2579 #endif
2580                   // Fall thru
2581   case T_OBJECT:                // Really a handle
2582   case T_ARRAY:
2583   case T_INT:
2584                   __ mov(O0, I0);
2585                   break;
2586   case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2587   case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
2588   case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
2589   case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
2590     break;                      // Cannot de-handlize until after reclaiming jvm_lock
2591   default:
2592     ShouldNotReachHere();
2593   }
2594 
2595   Label after_transition;
2596   // must we block?
2597 
2598   // Block, if necessary, before resuming in _thread_in_Java state.
2599   // In order for GC to work, don't clear the last_Java_sp until after blocking.
2600   { Label no_block;
2601     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2602 
2603     // Switch thread to "native transition" state before reading the synchronization state.
2604     // This additional state is necessary because reading and testing the synchronization
2605     // state is not atomic w.r.t. GC, as this scenario demonstrates:
2606     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2607     //     VM thread changes sync state to synchronizing and suspends threads for GC.
2608     //     Thread A is resumed to finish this native method, but doesn't block here since it
2609     //     didn't see any synchronization is progress, and escapes.
2610     __ set(_thread_in_native_trans, G3_scratch);
2611     __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2612     if(os::is_MP()) {
2613       if (UseMembar) {
2614         // Force this write out before the read below
2615         __ membar(Assembler::StoreLoad);
2616       } else {
2617         // Write serialization page so VM thread can do a pseudo remote membar.
2618         // We use the current thread pointer to calculate a thread specific
2619         // offset to write to within the page. This minimizes bus traffic
2620         // due to cache line collision.
2621         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2622       }
2623     }
2624     __ load_contents(sync_state, G3_scratch);
2625     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2626 
2627     Label L;
2628     Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2629     __ br(Assembler::notEqual, false, Assembler::pn, L);
2630     __ delayed()->ld(suspend_state, G3_scratch);
2631     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2632     __ bind(L);
2633 
2634     // Block.  Save any potential method result value before the operation and
2635     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2636     // lets us share the oopMap we used when we went native rather the create
2637     // a distinct one for this pc
2638     //
2639     save_native_result(masm, ret_type, stack_slots);
2640     if (!is_critical_native) {
2641       __ call_VM_leaf(L7_thread_cache,
2642                       CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2643                       G2_thread);
2644     } else {
2645       __ call_VM_leaf(L7_thread_cache,
2646                       CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2647                       G2_thread);
2648     }
2649 
2650     // Restore any method result value
2651     restore_native_result(masm, ret_type, stack_slots);
2652 
2653     if (is_critical_native) {
2654       // The call above performed the transition to thread_in_Java so
2655       // skip the transition logic below.
2656       __ ba(after_transition);
2657       __ delayed()->nop();
2658     }
2659 
2660     __ bind(no_block);
2661   }
2662 
2663   // thread state is thread_in_native_trans. Any safepoint blocking has already
2664   // happened so we can now change state to _thread_in_Java.
2665   __ set(_thread_in_Java, G3_scratch);
2666   __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2667   __ bind(after_transition);
2668 
2669   Label no_reguard;
2670   __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2671   __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_reserved_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
2672 
2673     save_native_result(masm, ret_type, stack_slots);
2674   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2675   __ delayed()->nop();
2676 
2677   __ restore_thread(L7_thread_cache); // restore G2_thread
2678     restore_native_result(masm, ret_type, stack_slots);
2679 
2680   __ bind(no_reguard);
2681 
2682   // Handle possible exception (will unlock if necessary)
2683 
2684   // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2685 
2686   // Unlock
2687   if (method->is_synchronized()) {
2688     Label done;
2689     Register I2_ex_oop = I2;
2690     const Register L3_box = L3;
2691     // Get locked oop from the handle we passed to jni
2692     __ ld_ptr(L6_handle, 0, L4);
2693     __ add(SP, lock_offset+STACK_BIAS, L3_box);
2694     // Must save pending exception around the slow-path VM call.  Since it's a
2695     // leaf call, the pending exception (if any) can be kept in a register.
2696     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2697     // Now unlock
2698     //                       (Roop, Rmark, Rbox,   Rscratch)
2699     __ compiler_unlock_object(L4,   L1,    L3_box, L2);
2700     __ br(Assembler::equal, false, Assembler::pt, done);
2701     __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2702 
2703     // save and restore any potential method result value around the unlocking
2704     // operation.  Will save in I0 (or stack for FP returns).
2705     save_native_result(masm, ret_type, stack_slots);
2706 
2707     // Must clear pending-exception before re-entering the VM.  Since this is
2708     // a leaf call, pending-exception-oop can be safely kept in a register.
2709     __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2710 
2711     // slow case of monitor enter.  Inline a special case of call_VM that
2712     // disallows any pending_exception.
2713     __ mov(L3_box, O1);
2714 
2715     // Pass in current thread pointer
2716     __ mov(G2_thread, O2);
2717 
2718     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2719     __ delayed()->mov(L4, O0);              // Need oop in O0
2720 
2721     __ restore_thread(L7_thread_cache); // restore G2_thread
2722 
2723 #ifdef ASSERT
2724     { Label L;
2725     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2726     __ br_null_short(O0, Assembler::pt, L);
2727     __ stop("no pending exception allowed on exit from IR::monitorexit");
2728     __ bind(L);
2729     }
2730 #endif
2731     restore_native_result(masm, ret_type, stack_slots);
2732     // check_forward_pending_exception jump to forward_exception if any pending
2733     // exception is set.  The forward_exception routine expects to see the
2734     // exception in pending_exception and not in a register.  Kind of clumsy,
2735     // since all folks who branch to forward_exception must have tested
2736     // pending_exception first and hence have it in a register already.
2737     __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2738     __ bind(done);
2739   }
2740 
2741   // Tell dtrace about this method exit
2742   {
2743     SkipIfEqual skip_if(
2744       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2745     save_native_result(masm, ret_type, stack_slots);
2746     __ set_metadata_constant(method(), O1);
2747     __ call_VM_leaf(L7_thread_cache,
2748        CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2749        G2_thread, O1);
2750     restore_native_result(masm, ret_type, stack_slots);
2751   }
2752 
2753   // Clear "last Java frame" SP and PC.
2754   __ verify_thread(); // G2_thread must be correct
2755   __ reset_last_Java_frame();
2756 
2757   // Unpack oop result
2758   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2759       Label L;
2760       __ addcc(G0, I0, G0);
2761       __ brx(Assembler::notZero, true, Assembler::pt, L);
2762       __ delayed()->ld_ptr(I0, 0, I0);
2763       __ mov(G0, I0);
2764       __ bind(L);
2765       __ verify_oop(I0);
2766   }
2767 
2768   if (CheckJNICalls) {
2769     // clear_pending_jni_exception_check
2770     __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset());
2771   }
2772 
2773   if (!is_critical_native) {
2774     // reset handle block
2775     __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2776     __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2777 
2778     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2779     check_forward_pending_exception(masm, G3_scratch);
2780   }
2781 
2782 
2783   // Return
2784 
2785 #ifndef _LP64
2786   if (ret_type == T_LONG) {
2787 
2788     // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2789     __ sllx(I0, 32, G1);          // Shift bits into high G1
2790     __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
2791     __ or3 (I1, G1, G1);          // OR 64 bits into G1
2792   }
2793 #endif
2794 
2795   __ ret();
2796   __ delayed()->restore();
2797 
2798   __ flush();
2799 
2800   nmethod *nm = nmethod::new_native_nmethod(method,
2801                                             compile_id,
2802                                             masm->code(),
2803                                             vep_offset,
2804                                             frame_complete,
2805                                             stack_slots / VMRegImpl::slots_per_word,
2806                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2807                                             in_ByteSize(lock_offset),
2808                                             oop_maps);
2809 
2810   if (is_critical_native) {
2811     nm->set_lazy_critical_native(true);
2812   }
2813   return nm;
2814 
2815 }
2816 
2817 // this function returns the adjust size (in number of words) to a c2i adapter
2818 // activation for use during deoptimization
2819 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2820   assert(callee_locals >= callee_parameters,
2821           "test and remove; got more parms than locals");
2822   if (callee_locals < callee_parameters)
2823     return 0;                   // No adjustment for negative locals
2824   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2825   return round_to(diff, WordsPerLong);
2826 }
2827 
2828 // "Top of Stack" slots that may be unused by the calling convention but must
2829 // otherwise be preserved.
2830 // On Intel these are not necessary and the value can be zero.
2831 // On Sparc this describes the words reserved for storing a register window
2832 // when an interrupt occurs.
2833 uint SharedRuntime::out_preserve_stack_slots() {
2834   return frame::register_save_words * VMRegImpl::slots_per_word;
2835 }
2836 
2837 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
2838 //
2839 // Common out the new frame generation for deopt and uncommon trap
2840 //
2841   Register        G3pcs              = G3_scratch; // Array of new pcs (input)
2842   Register        Oreturn0           = O0;
2843   Register        Oreturn1           = O1;
2844   Register        O2UnrollBlock      = O2;
2845   Register        O3array            = O3;         // Array of frame sizes (input)
2846   Register        O4array_size       = O4;         // number of frames (input)
2847   Register        O7frame_size       = O7;         // number of frames (input)
2848 
2849   __ ld_ptr(O3array, 0, O7frame_size);
2850   __ sub(G0, O7frame_size, O7frame_size);
2851   __ save(SP, O7frame_size, SP);
2852   __ ld_ptr(G3pcs, 0, I7);                      // load frame's new pc
2853 
2854   #ifdef ASSERT
2855   // make sure that the frames are aligned properly
2856 #ifndef _LP64
2857   __ btst(wordSize*2-1, SP);
2858   __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
2859 #endif
2860   #endif
2861 
2862   // Deopt needs to pass some extra live values from frame to frame
2863 
2864   if (deopt) {
2865     __ mov(Oreturn0->after_save(), Oreturn0);
2866     __ mov(Oreturn1->after_save(), Oreturn1);
2867   }
2868 
2869   __ mov(O4array_size->after_save(), O4array_size);
2870   __ sub(O4array_size, 1, O4array_size);
2871   __ mov(O3array->after_save(), O3array);
2872   __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
2873   __ add(G3pcs, wordSize, G3pcs);               // point to next pc value
2874 
2875   #ifdef ASSERT
2876   // trash registers to show a clear pattern in backtraces
2877   __ set(0xDEAD0000, I0);
2878   __ add(I0,  2, I1);
2879   __ add(I0,  4, I2);
2880   __ add(I0,  6, I3);
2881   __ add(I0,  8, I4);
2882   // Don't touch I5 could have valuable savedSP
2883   __ set(0xDEADBEEF, L0);
2884   __ mov(L0, L1);
2885   __ mov(L0, L2);
2886   __ mov(L0, L3);
2887   __ mov(L0, L4);
2888   __ mov(L0, L5);
2889 
2890   // trash the return value as there is nothing to return yet
2891   __ set(0xDEAD0001, O7);
2892   #endif
2893 
2894   __ mov(SP, O5_savedSP);
2895 }
2896 
2897 
2898 static void make_new_frames(MacroAssembler* masm, bool deopt) {
2899   //
2900   // loop through the UnrollBlock info and create new frames
2901   //
2902   Register        G3pcs              = G3_scratch;
2903   Register        Oreturn0           = O0;
2904   Register        Oreturn1           = O1;
2905   Register        O2UnrollBlock      = O2;
2906   Register        O3array            = O3;
2907   Register        O4array_size       = O4;
2908   Label           loop;
2909 
2910 #ifdef ASSERT
2911   // Compilers generate code that bang the stack by as much as the
2912   // interpreter would need. So this stack banging should never
2913   // trigger a fault. Verify that it does not on non product builds.
2914   if (UseStackBanging) {
2915     // Get total frame size for interpreted frames
2916     __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
2917     __ bang_stack_size(O4, O3, G3_scratch);
2918   }
2919 #endif
2920 
2921   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
2922   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
2923   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
2924 
2925   // Adjust old interpreter frame to make space for new frame's extra java locals
2926   //
2927   // We capture the original sp for the transition frame only because it is needed in
2928   // order to properly calculate interpreter_sp_adjustment. Even though in real life
2929   // every interpreter frame captures a savedSP it is only needed at the transition
2930   // (fortunately). If we had to have it correct everywhere then we would need to
2931   // be told the sp_adjustment for each frame we create. If the frame size array
2932   // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
2933   // for each frame we create and keep up the illusion every where.
2934   //
2935 
2936   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
2937   __ mov(SP, O5_savedSP);       // remember initial sender's original sp before adjustment
2938   __ sub(SP, O7, SP);
2939 
2940 #ifdef ASSERT
2941   // make sure that there is at least one entry in the array
2942   __ tst(O4array_size);
2943   __ breakpoint_trap(Assembler::zero, Assembler::icc);
2944 #endif
2945 
2946   // Now push the new interpreter frames
2947   __ bind(loop);
2948 
2949   // allocate a new frame, filling the registers
2950 
2951   gen_new_frame(masm, deopt);        // allocate an interpreter frame
2952 
2953   __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
2954   __ delayed()->add(O3array, wordSize, O3array);
2955   __ ld_ptr(G3pcs, 0, O7);                      // load final frame new pc
2956 
2957 }
2958 
2959 //------------------------------generate_deopt_blob----------------------------
2960 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
2961 // instead.
2962 void SharedRuntime::generate_deopt_blob() {
2963   // allocate space for the code
2964   ResourceMark rm;
2965   // setup code generation tools
2966   int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
2967 #ifdef ASSERT
2968   if (UseStackBanging) {
2969     pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
2970   }
2971 #endif
2972 #if INCLUDE_JVMCI
2973   if (EnableJVMCI) {
2974     pad += 1000; // Increase the buffer size when compiling for JVMCI
2975   }
2976 #endif
2977 #ifdef _LP64
2978   CodeBuffer buffer("deopt_blob", 2100+pad, 512);
2979 #else
2980   // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
2981   // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
2982   CodeBuffer buffer("deopt_blob", 1600+pad, 512);
2983 #endif /* _LP64 */
2984   MacroAssembler* masm               = new MacroAssembler(&buffer);
2985   FloatRegister   Freturn0           = F0;
2986   Register        Greturn1           = G1;
2987   Register        Oreturn0           = O0;
2988   Register        Oreturn1           = O1;
2989   Register        O2UnrollBlock      = O2;
2990   Register        L0deopt_mode       = L0;
2991   Register        G4deopt_mode       = G4_scratch;
2992   int             frame_size_words;
2993   Address         saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
2994 #if !defined(_LP64) && defined(COMPILER2)
2995   Address         saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
2996 #endif
2997   Label           cont;
2998 
2999   OopMapSet *oop_maps = new OopMapSet();
3000 
3001   //
3002   // This is the entry point for code which is returning to a de-optimized
3003   // frame.
3004   // The steps taken by this frame are as follows:
3005   //   - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
3006   //     and all potentially live registers (at a pollpoint many registers can be live).
3007   //
3008   //   - call the C routine: Deoptimization::fetch_unroll_info (this function
3009   //     returns information about the number and size of interpreter frames
3010   //     which are equivalent to the frame which is being deoptimized)
3011   //   - deallocate the unpack frame, restoring only results values. Other
3012   //     volatile registers will now be captured in the vframeArray as needed.
3013   //   - deallocate the deoptimization frame
3014   //   - in a loop using the information returned in the previous step
3015   //     push new interpreter frames (take care to propagate the return
3016   //     values through each new frame pushed)
3017   //   - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3018   //   - call the C routine: Deoptimization::unpack_frames (this function
3019   //     lays out values on the interpreter frame which was just created)
3020   //   - deallocate the dummy unpack_frame
3021   //   - ensure that all the return values are correctly set and then do
3022   //     a return to the interpreter entry point
3023   //
3024   // Refer to the following methods for more information:
3025   //   - Deoptimization::fetch_unroll_info
3026   //   - Deoptimization::unpack_frames
3027 
3028   OopMap* map = NULL;
3029 
3030   int start = __ offset();
3031 
3032   // restore G2, the trampoline destroyed it
3033   __ get_thread();
3034 
3035   // On entry we have been called by the deoptimized nmethod with a call that
3036   // replaced the original call (or safepoint polling location) so the deoptimizing
3037   // pc is now in O7. Return values are still in the expected places
3038 
3039   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3040   __ ba(cont);
3041   __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
3042 
3043 
3044 #if INCLUDE_JVMCI
3045   Label after_fetch_unroll_info_call;
3046   int implicit_exception_uncommon_trap_offset = 0;
3047   int uncommon_trap_offset = 0;
3048 
3049   if (EnableJVMCI) {
3050     masm->block_comment("BEGIN implicit_exception_uncommon_trap");
3051     implicit_exception_uncommon_trap_offset = __ offset() - start;
3052 
3053     __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()), O7);
3054     __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
3055     __ add(O7, -8, O7);
3056 
3057     uncommon_trap_offset = __ offset() - start;
3058 
3059     // Save everything in sight.
3060     (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3061     __ set_last_Java_frame(SP, NULL);
3062 
3063     __ ld(G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()), O1);
3064     __ sub(G0, 1, L1);
3065     __ st(L1, G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()));
3066 
3067     __ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode);
3068     __ mov(G2_thread, O0);
3069     __ mov(L0deopt_mode, O2);
3070     __ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
3071     __ delayed()->nop();
3072     oop_maps->add_gc_map( __ offset()-start, map->deep_copy());
3073     __ get_thread();
3074     __ add(O7, 8, O7);
3075     __ reset_last_Java_frame();
3076 
3077     __ ba(after_fetch_unroll_info_call);
3078     __ delayed()->nop(); // Delay slot
3079     masm->block_comment("END implicit_exception_uncommon_trap");
3080   } // EnableJVMCI
3081 #endif // INCLUDE_JVMCI
3082 
3083   int exception_offset = __ offset() - start;
3084 
3085   // restore G2, the trampoline destroyed it
3086   __ get_thread();
3087 
3088   // On entry we have been jumped to by the exception handler (or exception_blob
3089   // for server).  O0 contains the exception oop and O7 contains the original
3090   // exception pc.  So if we push a frame here it will look to the
3091   // stack walking code (fetch_unroll_info) just like a normal call so
3092   // state will be extracted normally.
3093 
3094   // save exception oop in JavaThread and fall through into the
3095   // exception_in_tls case since they are handled in same way except
3096   // for where the pending exception is kept.
3097   __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3098 
3099   //
3100   // Vanilla deoptimization with an exception pending in exception_oop
3101   //
3102   int exception_in_tls_offset = __ offset() - start;
3103 
3104   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3105   // Opens a new stack frame
3106   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3107 
3108   // Restore G2_thread
3109   __ get_thread();
3110 
3111 #ifdef ASSERT
3112   {
3113     // verify that there is really an exception oop in exception_oop
3114     Label has_exception;
3115     __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3116     __ br_notnull_short(Oexception, Assembler::pt, has_exception);
3117     __ stop("no exception in thread");
3118     __ bind(has_exception);
3119 
3120     // verify that there is no pending exception
3121     Label no_pending_exception;
3122     Address exception_addr(G2_thread, Thread::pending_exception_offset());
3123     __ ld_ptr(exception_addr, Oexception);
3124     __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
3125     __ stop("must not have pending exception here");
3126     __ bind(no_pending_exception);
3127   }
3128 #endif
3129 
3130   __ ba(cont);
3131   __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3132 
3133   //
3134   // Reexecute entry, similar to c2 uncommon trap
3135   //
3136   int reexecute_offset = __ offset() - start;
3137 #if INCLUDE_JVMCI && !defined(COMPILER1)
3138   if (EnableJVMCI && UseJVMCICompiler) {
3139     // JVMCI does not use this kind of deoptimization
3140     __ should_not_reach_here();
3141   }
3142 #endif
3143   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3144   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3145 
3146   __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3147 
3148   __ bind(cont);
3149 
3150   __ set_last_Java_frame(SP, noreg);
3151 
3152   // do the call by hand so we can get the oopmap
3153 
3154   __ mov(G2_thread, L7_thread_cache);
3155   __ mov(L0deopt_mode, O1);
3156   __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3157   __ delayed()->mov(G2_thread, O0);
3158 
3159   // Set an oopmap for the call site this describes all our saved volatile registers
3160 
3161   oop_maps->add_gc_map( __ offset()-start, map);
3162 
3163   __ mov(L7_thread_cache, G2_thread);
3164 
3165   __ reset_last_Java_frame();
3166 
3167 #if INCLUDE_JVMCI
3168   if (EnableJVMCI) {
3169     __ bind(after_fetch_unroll_info_call);
3170   }
3171 #endif
3172   // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3173   // so this move will survive
3174 
3175   __ mov(L0deopt_mode, G4deopt_mode);
3176 
3177   __ mov(O0, O2UnrollBlock->after_save());
3178 
3179   RegisterSaver::restore_result_registers(masm);
3180 
3181   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), G4deopt_mode);
3182   Label noException;
3183   __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
3184 
3185   // Move the pending exception from exception_oop to Oexception so
3186   // the pending exception will be picked up the interpreter.
3187   __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3188   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3189   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
3190   __ bind(noException);
3191 
3192   // deallocate the deoptimization frame taking care to preserve the return values
3193   __ mov(Oreturn0,     Oreturn0->after_save());
3194   __ mov(Oreturn1,     Oreturn1->after_save());
3195   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3196   __ restore();
3197 
3198   // Allocate new interpreter frame(s) and possible c2i adapter frame
3199 
3200   make_new_frames(masm, true);
3201 
3202   // push a dummy "unpack_frame" taking care of float return values and
3203   // call Deoptimization::unpack_frames to have the unpacker layout
3204   // information in the interpreter frames just created and then return
3205   // to the interpreter entry point
3206   __ save(SP, -frame_size_words*wordSize, SP);
3207   __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3208 #if !defined(_LP64)
3209 #if defined(COMPILER2)
3210   // 32-bit 1-register longs return longs in G1
3211   __ stx(Greturn1, saved_Greturn1_addr);
3212 #endif
3213   __ set_last_Java_frame(SP, noreg);
3214   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3215 #else
3216   // LP64 uses g4 in set_last_Java_frame
3217   __ mov(G4deopt_mode, O1);
3218   __ set_last_Java_frame(SP, G0);
3219   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3220 #endif
3221   __ reset_last_Java_frame();
3222   __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3223 
3224 #if !defined(_LP64) && defined(COMPILER2)
3225   // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3226   // I0/I1 if the return value is long.
3227   Label not_long;
3228   __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
3229   __ ldd(saved_Greturn1_addr,I0);
3230   __ bind(not_long);
3231 #endif
3232   __ ret();
3233   __ delayed()->restore();
3234 
3235   masm->flush();
3236   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3237   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3238 #if INCLUDE_JVMCI
3239   if (EnableJVMCI) {
3240     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
3241     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
3242   }
3243 #endif
3244 }
3245 
3246 #ifdef COMPILER2
3247 
3248 //------------------------------generate_uncommon_trap_blob--------------------
3249 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3250 // instead.
3251 void SharedRuntime::generate_uncommon_trap_blob() {
3252   // allocate space for the code
3253   ResourceMark rm;
3254   // setup code generation tools
3255   int pad = VerifyThread ? 512 : 0;
3256 #ifdef ASSERT
3257   if (UseStackBanging) {
3258     pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
3259   }
3260 #endif
3261 #ifdef _LP64
3262   CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3263 #else
3264   // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3265   // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3266   CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3267 #endif
3268   MacroAssembler* masm               = new MacroAssembler(&buffer);
3269   Register        O2UnrollBlock      = O2;
3270   Register        O2klass_index      = O2;
3271 
3272   //
3273   // This is the entry point for all traps the compiler takes when it thinks
3274   // it cannot handle further execution of compilation code. The frame is
3275   // deoptimized in these cases and converted into interpreter frames for
3276   // execution
3277   // The steps taken by this frame are as follows:
3278   //   - push a fake "unpack_frame"
3279   //   - call the C routine Deoptimization::uncommon_trap (this function
3280   //     packs the current compiled frame into vframe arrays and returns
3281   //     information about the number and size of interpreter frames which
3282   //     are equivalent to the frame which is being deoptimized)
3283   //   - deallocate the "unpack_frame"
3284   //   - deallocate the deoptimization frame
3285   //   - in a loop using the information returned in the previous step
3286   //     push interpreter frames;
3287   //   - create a dummy "unpack_frame"
3288   //   - call the C routine: Deoptimization::unpack_frames (this function
3289   //     lays out values on the interpreter frame which was just created)
3290   //   - deallocate the dummy unpack_frame
3291   //   - return to the interpreter entry point
3292   //
3293   //  Refer to the following methods for more information:
3294   //   - Deoptimization::uncommon_trap
3295   //   - Deoptimization::unpack_frame
3296 
3297   // the unloaded class index is in O0 (first parameter to this blob)
3298 
3299   // push a dummy "unpack_frame"
3300   // and call Deoptimization::uncommon_trap to pack the compiled frame into
3301   // vframe array and return the UnrollBlock information
3302   __ save_frame(0);
3303   __ set_last_Java_frame(SP, noreg);
3304   __ mov(I0, O2klass_index);
3305   __ mov(Deoptimization::Unpack_uncommon_trap, O3); // exec mode
3306   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index, O3);
3307   __ reset_last_Java_frame();
3308   __ mov(O0, O2UnrollBlock->after_save());
3309   __ restore();
3310 
3311   // deallocate the deoptimized frame taking care to preserve the return values
3312   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3313   __ restore();
3314 
3315 #ifdef ASSERT
3316   { Label L;
3317     __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), O1);
3318     __ cmp_and_br_short(O1, Deoptimization::Unpack_uncommon_trap, Assembler::equal, Assembler::pt, L);
3319     __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3320     __ bind(L);
3321   }
3322 #endif
3323 
3324   // Allocate new interpreter frame(s) and possible c2i adapter frame
3325 
3326   make_new_frames(masm, false);
3327 
3328   // push a dummy "unpack_frame" taking care of float return values and
3329   // call Deoptimization::unpack_frames to have the unpacker layout
3330   // information in the interpreter frames just created and then return
3331   // to the interpreter entry point
3332   __ save_frame(0);
3333   __ set_last_Java_frame(SP, noreg);
3334   __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3335   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3336   __ reset_last_Java_frame();
3337   __ ret();
3338   __ delayed()->restore();
3339 
3340   masm->flush();
3341   _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3342 }
3343 
3344 #endif // COMPILER2
3345 
3346 //------------------------------generate_handler_blob-------------------
3347 //
3348 // Generate a special Compile2Runtime blob that saves all registers, and sets
3349 // up an OopMap.
3350 //
3351 // This blob is jumped to (via a breakpoint and the signal handler) from a
3352 // safepoint in compiled code.  On entry to this blob, O7 contains the
3353 // address in the original nmethod at which we should resume normal execution.
3354 // Thus, this blob looks like a subroutine which must preserve lots of
3355 // registers and return normally.  Note that O7 is never register-allocated,
3356 // so it is guaranteed to be free here.
3357 //
3358 
3359 // The hardest part of what this blob must do is to save the 64-bit %o
3360 // registers in the 32-bit build.  A simple 'save' turn the %o's to %i's and
3361 // an interrupt will chop off their heads.  Making space in the caller's frame
3362 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3363 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3364 // SP and mess up HIS OopMaps.  So we first adjust the caller's SP, then save
3365 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3366 // Tricky, tricky, tricky...
3367 
3368 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3369   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3370 
3371   // allocate space for the code
3372   ResourceMark rm;
3373   // setup code generation tools
3374   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3375   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3376   CodeBuffer buffer("handler_blob", 1600, 512);
3377   MacroAssembler* masm                = new MacroAssembler(&buffer);
3378   int             frame_size_words;
3379   OopMapSet *oop_maps = new OopMapSet();
3380   OopMap* map = NULL;
3381 
3382   int start = __ offset();
3383 
3384   bool cause_return = (poll_type == POLL_AT_RETURN);
3385   // If this causes a return before the processing, then do a "restore"
3386   if (cause_return) {
3387     __ restore();
3388   } else {
3389     // Make it look like we were called via the poll
3390     // so that frame constructor always sees a valid return address
3391     __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3392     __ sub(O7, frame::pc_return_offset, O7);
3393   }
3394 
3395   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3396 
3397   // setup last_Java_sp (blows G4)
3398   __ set_last_Java_frame(SP, noreg);
3399 
3400   // call into the runtime to handle illegal instructions exception
3401   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3402   __ mov(G2_thread, O0);
3403   __ save_thread(L7_thread_cache);
3404   __ call(call_ptr);
3405   __ delayed()->nop();
3406 
3407   // Set an oopmap for the call site.
3408   // We need this not only for callee-saved registers, but also for volatile
3409   // registers that the compiler might be keeping live across a safepoint.
3410 
3411   oop_maps->add_gc_map( __ offset() - start, map);
3412 
3413   __ restore_thread(L7_thread_cache);
3414   // clear last_Java_sp
3415   __ reset_last_Java_frame();
3416 
3417   // Check for exceptions
3418   Label pending;
3419 
3420   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3421   __ br_notnull_short(O1, Assembler::pn, pending);
3422 
3423   RegisterSaver::restore_live_registers(masm);
3424 
3425   // We are back the the original state on entry and ready to go.
3426 
3427   __ retl();
3428   __ delayed()->nop();
3429 
3430   // Pending exception after the safepoint
3431 
3432   __ bind(pending);
3433 
3434   RegisterSaver::restore_live_registers(masm);
3435 
3436   // We are back the the original state on entry.
3437 
3438   // Tail-call forward_exception_entry, with the issuing PC in O7,
3439   // so it looks like the original nmethod called forward_exception_entry.
3440   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3441   __ JMP(O0, 0);
3442   __ delayed()->nop();
3443 
3444   // -------------
3445   // make sure all code is generated
3446   masm->flush();
3447 
3448   // return exception blob
3449   return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3450 }
3451 
3452 //
3453 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3454 //
3455 // Generate a stub that calls into vm to find out the proper destination
3456 // of a java call. All the argument registers are live at this point
3457 // but since this is generic code we don't know what they are and the caller
3458 // must do any gc of the args.
3459 //
3460 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3461   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3462 
3463   // allocate space for the code
3464   ResourceMark rm;
3465   // setup code generation tools
3466   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3467   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3468   CodeBuffer buffer(name, 1600, 512);
3469   MacroAssembler* masm                = new MacroAssembler(&buffer);
3470   int             frame_size_words;
3471   OopMapSet *oop_maps = new OopMapSet();
3472   OopMap* map = NULL;
3473 
3474   int start = __ offset();
3475 
3476   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3477 
3478   int frame_complete = __ offset();
3479 
3480   // setup last_Java_sp (blows G4)
3481   __ set_last_Java_frame(SP, noreg);
3482 
3483   // call into the runtime to handle illegal instructions exception
3484   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3485   __ mov(G2_thread, O0);
3486   __ save_thread(L7_thread_cache);
3487   __ call(destination, relocInfo::runtime_call_type);
3488   __ delayed()->nop();
3489 
3490   // O0 contains the address we are going to jump to assuming no exception got installed
3491 
3492   // Set an oopmap for the call site.
3493   // We need this not only for callee-saved registers, but also for volatile
3494   // registers that the compiler might be keeping live across a safepoint.
3495 
3496   oop_maps->add_gc_map( __ offset() - start, map);
3497 
3498   __ restore_thread(L7_thread_cache);
3499   // clear last_Java_sp
3500   __ reset_last_Java_frame();
3501 
3502   // Check for exceptions
3503   Label pending;
3504 
3505   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3506   __ br_notnull_short(O1, Assembler::pn, pending);
3507 
3508   // get the returned Method*
3509 
3510   __ get_vm_result_2(G5_method);
3511   __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3512 
3513   // O0 is where we want to jump, overwrite G3 which is saved and scratch
3514 
3515   __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3516 
3517   RegisterSaver::restore_live_registers(masm);
3518 
3519   // We are back the the original state on entry and ready to go.
3520 
3521   __ JMP(G3, 0);
3522   __ delayed()->nop();
3523 
3524   // Pending exception after the safepoint
3525 
3526   __ bind(pending);
3527 
3528   RegisterSaver::restore_live_registers(masm);
3529 
3530   // We are back the the original state on entry.
3531 
3532   // Tail-call forward_exception_entry, with the issuing PC in O7,
3533   // so it looks like the original nmethod called forward_exception_entry.
3534   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3535   __ JMP(O0, 0);
3536   __ delayed()->nop();
3537 
3538   // -------------
3539   // make sure all code is generated
3540   masm->flush();
3541 
3542   // return the  blob
3543   // frame_size_words or bytes??
3544   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3545 }