1 /*
   2  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/debugInfoRec.hpp"
  28 #include "code/icBuffer.hpp"
  29 #include "code/vtableStubs.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "oops/compiledICHolder.hpp"
  32 #include "prims/jvmtiRedefineClassesTrace.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "runtime/vframeArray.hpp"
  35 #include "vmreg_sparc.inline.hpp"
  36 #ifdef COMPILER1
  37 #include "c1/c1_Runtime1.hpp"
  38 #endif
  39 #ifdef COMPILER2
  40 #include "opto/runtime.hpp"
  41 #endif
  42 #ifdef SHARK
  43 #include "compiler/compileBroker.hpp"
  44 #include "shark/sharkCompiler.hpp"
  45 #endif
  46 #if INCLUDE_JVMCI
  47 #include "jvmci/jvmciJavaClasses.hpp"
  48 #endif
  49 
  50 #define __ masm->
  51 
  52 
  53 class RegisterSaver {
  54 
  55   // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
  56   // The Oregs are problematic. In the 32bit build the compiler can
  57   // have O registers live with 64 bit quantities. A window save will
  58   // cut the heads off of the registers. We have to do a very extensive
  59   // stack dance to save and restore these properly.
  60 
  61   // Note that the Oregs problem only exists if we block at either a polling
  62   // page exception a compiled code safepoint that was not originally a call
  63   // or deoptimize following one of these kinds of safepoints.
  64 
  65   // Lots of registers to save.  For all builds, a window save will preserve
  66   // the %i and %l registers.  For the 32-bit longs-in-two entries and 64-bit
  67   // builds a window-save will preserve the %o registers.  In the LION build
  68   // we need to save the 64-bit %o registers which requires we save them
  69   // before the window-save (as then they become %i registers and get their
  70   // heads chopped off on interrupt).  We have to save some %g registers here
  71   // as well.
  72   enum {
  73     // This frame's save area.  Includes extra space for the native call:
  74     // vararg's layout space and the like.  Briefly holds the caller's
  75     // register save area.
  76     call_args_area = frame::register_save_words_sp_offset +
  77                      frame::memory_parameter_word_sp_offset*wordSize,
  78     // Make sure save locations are always 8 byte aligned.
  79     // can't use round_to because it doesn't produce compile time constant
  80     start_of_extra_save_area = ((call_args_area + 7) & ~7),
  81     g1_offset = start_of_extra_save_area, // g-regs needing saving
  82     g3_offset = g1_offset+8,
  83     g4_offset = g3_offset+8,
  84     g5_offset = g4_offset+8,
  85     o0_offset = g5_offset+8,
  86     o1_offset = o0_offset+8,
  87     o2_offset = o1_offset+8,
  88     o3_offset = o2_offset+8,
  89     o4_offset = o3_offset+8,
  90     o5_offset = o4_offset+8,
  91     start_of_flags_save_area = o5_offset+8,
  92     ccr_offset = start_of_flags_save_area,
  93     fsr_offset = ccr_offset + 8,
  94     d00_offset = fsr_offset+8,  // Start of float save area
  95     register_save_size = d00_offset+8*32
  96   };
  97 
  98 
  99   public:
 100 
 101   static int Oexception_offset() { return o0_offset; };
 102   static int G3_offset() { return g3_offset; };
 103   static int G5_offset() { return g5_offset; };
 104   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
 105   static void restore_live_registers(MacroAssembler* masm);
 106 
 107   // During deoptimization only the result register need to be restored
 108   // all the other values have already been extracted.
 109 
 110   static void restore_result_registers(MacroAssembler* masm);
 111 };
 112 
 113 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 114   // Record volatile registers as callee-save values in an OopMap so their save locations will be
 115   // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
 116   // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
 117   // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
 118   // (as the stub's I's) when the runtime routine called by the stub creates its frame.
 119   int i;
 120   // Always make the frame size 16 byte aligned.
 121   int frame_size = round_to(additional_frame_words + register_save_size, 16);
 122   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
 123   int frame_size_in_slots = frame_size / sizeof(jint);
 124   // CodeBlob frame size is in words.
 125   *total_frame_words = frame_size / wordSize;
 126   // OopMap* map = new OopMap(*total_frame_words, 0);
 127   OopMap* map = new OopMap(frame_size_in_slots, 0);
 128 
 129 #if !defined(_LP64)
 130 
 131   // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
 132   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 133   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 134   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
 135   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
 136   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
 137   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
 138 #endif /* _LP64 */
 139 
 140   __ save(SP, -frame_size, SP);
 141 
 142 #ifndef _LP64
 143   // Reload the 64 bit Oregs. Although they are now Iregs we load them
 144   // to Oregs here to avoid interrupts cutting off their heads
 145 
 146   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 147   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 148   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
 149   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
 150   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
 151   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
 152 
 153   __ stx(O0, SP, o0_offset+STACK_BIAS);
 154   map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
 155 
 156   __ stx(O1, SP, o1_offset+STACK_BIAS);
 157 
 158   map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
 159 
 160   __ stx(O2, SP, o2_offset+STACK_BIAS);
 161   map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
 162 
 163   __ stx(O3, SP, o3_offset+STACK_BIAS);
 164   map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
 165 
 166   __ stx(O4, SP, o4_offset+STACK_BIAS);
 167   map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
 168 
 169   __ stx(O5, SP, o5_offset+STACK_BIAS);
 170   map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
 171 #endif /* _LP64 */
 172 
 173 
 174 #ifdef _LP64
 175   int debug_offset = 0;
 176 #else
 177   int debug_offset = 4;
 178 #endif
 179   // Save the G's
 180   __ stx(G1, SP, g1_offset+STACK_BIAS);
 181   map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
 182 
 183   __ stx(G3, SP, g3_offset+STACK_BIAS);
 184   map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
 185 
 186   __ stx(G4, SP, g4_offset+STACK_BIAS);
 187   map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
 188 
 189   __ stx(G5, SP, g5_offset+STACK_BIAS);
 190   map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
 191 
 192   // This is really a waste but we'll keep things as they were for now
 193   if (true) {
 194 #ifndef _LP64
 195     map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
 196     map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
 197     map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
 198     map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
 199     map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
 200     map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
 201     map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
 202     map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
 203     map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
 204     map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
 205 #endif /* _LP64 */
 206   }
 207 
 208 
 209   // Save the flags
 210   __ rdccr( G5 );
 211   __ stx(G5, SP, ccr_offset+STACK_BIAS);
 212   __ stxfsr(SP, fsr_offset+STACK_BIAS);
 213 
 214   // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
 215   int offset = d00_offset;
 216   for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
 217     FloatRegister f = as_FloatRegister(i);
 218     __ stf(FloatRegisterImpl::D,  f, SP, offset+STACK_BIAS);
 219     // Record as callee saved both halves of double registers (2 float registers).
 220     map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
 221     map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
 222     offset += sizeof(double);
 223   }
 224 
 225   // And we're done.
 226 
 227   return map;
 228 }
 229 
 230 
 231 // Pop the current frame and restore all the registers that we
 232 // saved.
 233 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 234 
 235   // Restore all the FP registers
 236   for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
 237     __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
 238   }
 239 
 240   __ ldx(SP, ccr_offset+STACK_BIAS, G1);
 241   __ wrccr (G1) ;
 242 
 243   // Restore the G's
 244   // Note that G2 (AKA GThread) must be saved and restored separately.
 245   // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
 246 
 247   __ ldx(SP, g1_offset+STACK_BIAS, G1);
 248   __ ldx(SP, g3_offset+STACK_BIAS, G3);
 249   __ ldx(SP, g4_offset+STACK_BIAS, G4);
 250   __ ldx(SP, g5_offset+STACK_BIAS, G5);
 251 
 252 
 253 #if !defined(_LP64)
 254   // Restore the 64-bit O's.
 255   __ ldx(SP, o0_offset+STACK_BIAS, O0);
 256   __ ldx(SP, o1_offset+STACK_BIAS, O1);
 257   __ ldx(SP, o2_offset+STACK_BIAS, O2);
 258   __ ldx(SP, o3_offset+STACK_BIAS, O3);
 259   __ ldx(SP, o4_offset+STACK_BIAS, O4);
 260   __ ldx(SP, o5_offset+STACK_BIAS, O5);
 261 
 262   // And temporarily place them in TLS
 263 
 264   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 265   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 266   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
 267   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
 268   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
 269   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
 270 #endif /* _LP64 */
 271 
 272   // Restore flags
 273 
 274   __ ldxfsr(SP, fsr_offset+STACK_BIAS);
 275 
 276   __ restore();
 277 
 278 #if !defined(_LP64)
 279   // Now reload the 64bit Oregs after we've restore the window.
 280   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 281   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 282   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
 283   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
 284   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
 285   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
 286 #endif /* _LP64 */
 287 
 288 }
 289 
 290 // Pop the current frame and restore the registers that might be holding
 291 // a result.
 292 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 293 
 294 #if !defined(_LP64)
 295   // 32bit build returns longs in G1
 296   __ ldx(SP, g1_offset+STACK_BIAS, G1);
 297 
 298   // Retrieve the 64-bit O's.
 299   __ ldx(SP, o0_offset+STACK_BIAS, O0);
 300   __ ldx(SP, o1_offset+STACK_BIAS, O1);
 301   // and save to TLS
 302   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 303   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 304 #endif /* _LP64 */
 305 
 306   __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
 307 
 308   __ restore();
 309 
 310 #if !defined(_LP64)
 311   // Now reload the 64bit Oregs after we've restore the window.
 312   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 313   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 314 #endif /* _LP64 */
 315 
 316 }
 317 
 318 // Is vector's size (in bytes) bigger than a size saved by default?
 319 // 8 bytes FP registers are saved by default on SPARC.
 320 bool SharedRuntime::is_wide_vector(int size) {
 321   // Note, MaxVectorSize == 8 on SPARC.
 322   assert(size <= 8, "%d bytes vectors are not supported", size);
 323   return size > 8;
 324 }
 325 
 326 // The java_calling_convention describes stack locations as ideal slots on
 327 // a frame with no abi restrictions. Since we must observe abi restrictions
 328 // (like the placement of the register window) the slots must be biased by
 329 // the following value.
 330 static int reg2offset(VMReg r) {
 331   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 332 }
 333 
 334 static VMRegPair reg64_to_VMRegPair(Register r) {
 335   VMRegPair ret;
 336   if (wordSize == 8) {
 337     ret.set2(r->as_VMReg());
 338   } else {
 339     ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
 340   }
 341   return ret;
 342 }
 343 
 344 // ---------------------------------------------------------------------------
 345 // Read the array of BasicTypes from a signature, and compute where the
 346 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
 347 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 348 // refer to 4-byte stack slots.  All stack slots are based off of the window
 349 // top.  VMRegImpl::stack0 refers to the first slot past the 16-word window,
 350 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 351 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
 352 // integer registers.  Values 64-95 are the (32-bit only) float registers.
 353 // Each 32-bit quantity is given its own number, so the integer registers
 354 // (in either 32- or 64-bit builds) use 2 numbers.  For example, there is
 355 // an O0-low and an O0-high.  Essentially, all int register numbers are doubled.
 356 
 357 // Register results are passed in O0-O5, for outgoing call arguments.  To
 358 // convert to incoming arguments, convert all O's to I's.  The regs array
 359 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
 360 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
 361 // 32-bit value was passed).  If both are VMRegImpl::Bad(), it means no value was
 362 // passed (used as a placeholder for the other half of longs and doubles in
 363 // the 64-bit build).  regs[].second() is either VMRegImpl::Bad() or regs[].second() is
 364 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
 365 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
 366 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
 367 // same VMRegPair.
 368 
 369 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 370 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 371 // units regardless of build.
 372 
 373 
 374 // ---------------------------------------------------------------------------
 375 // The compiled Java calling convention.  The Java convention always passes
 376 // 64-bit values in adjacent aligned locations (either registers or stack),
 377 // floats in float registers and doubles in aligned float pairs.  There is
 378 // no backing varargs store for values in registers.
 379 // In the 32-bit build, longs are passed on the stack (cannot be
 380 // passed in I's, because longs in I's get their heads chopped off at
 381 // interrupt).
 382 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 383                                            VMRegPair *regs,
 384                                            int total_args_passed,
 385                                            int is_outgoing) {
 386   assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
 387 
 388   const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
 389   const int flt_reg_max = 8;
 390 
 391   int int_reg = 0;
 392   int flt_reg = 0;
 393   int slot = 0;
 394 
 395   for (int i = 0; i < total_args_passed; i++) {
 396     switch (sig_bt[i]) {
 397     case T_INT:
 398     case T_SHORT:
 399     case T_CHAR:
 400     case T_BYTE:
 401     case T_BOOLEAN:
 402 #ifndef _LP64
 403     case T_OBJECT:
 404     case T_ARRAY:
 405     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 406 #endif // _LP64
 407       if (int_reg < int_reg_max) {
 408         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 409         regs[i].set1(r->as_VMReg());
 410       } else {
 411         regs[i].set1(VMRegImpl::stack2reg(slot++));
 412       }
 413       break;
 414 
 415 #ifdef _LP64
 416     case T_LONG:
 417       assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
 418       // fall-through
 419     case T_OBJECT:
 420     case T_ARRAY:
 421     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
 422       if (int_reg < int_reg_max) {
 423         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
 424         regs[i].set2(r->as_VMReg());
 425       } else {
 426         slot = round_to(slot, 2);  // align
 427         regs[i].set2(VMRegImpl::stack2reg(slot));
 428         slot += 2;
 429       }
 430       break;
 431 #else
 432     case T_LONG:
 433       assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
 434       // On 32-bit SPARC put longs always on the stack to keep the pressure off
 435       // integer argument registers.  They should be used for oops.
 436       slot = round_to(slot, 2);  // align
 437       regs[i].set2(VMRegImpl::stack2reg(slot));
 438       slot += 2;
 439 #endif
 440       break;
 441 
 442     case T_FLOAT:
 443       if (flt_reg < flt_reg_max) {
 444         FloatRegister r = as_FloatRegister(flt_reg++);
 445         regs[i].set1(r->as_VMReg());
 446       } else {
 447         regs[i].set1(VMRegImpl::stack2reg(slot++));
 448       }
 449       break;
 450 
 451     case T_DOUBLE:
 452       assert(sig_bt[i+1] == T_VOID, "expecting half");
 453       if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
 454         flt_reg = round_to(flt_reg, 2);  // align
 455         FloatRegister r = as_FloatRegister(flt_reg);
 456         regs[i].set2(r->as_VMReg());
 457         flt_reg += 2;
 458       } else {
 459         slot = round_to(slot, 2);  // align
 460         regs[i].set2(VMRegImpl::stack2reg(slot));
 461         slot += 2;
 462       }
 463       break;
 464 
 465     case T_VOID:
 466       regs[i].set_bad();   // Halves of longs & doubles
 467       break;
 468 
 469     default:
 470       fatal("unknown basic type %d", sig_bt[i]);
 471       break;
 472     }
 473   }
 474 
 475   // retun the amount of stack space these arguments will need.
 476   return slot;
 477 }
 478 
 479 // Helper class mostly to avoid passing masm everywhere, and handle
 480 // store displacement overflow logic.
 481 class AdapterGenerator {
 482   MacroAssembler *masm;
 483   Register Rdisp;
 484   void set_Rdisp(Register r)  { Rdisp = r; }
 485 
 486   void patch_callers_callsite();
 487 
 488   // base+st_off points to top of argument
 489   int arg_offset(const int st_off) { return st_off; }
 490   int next_arg_offset(const int st_off) {
 491     return st_off - Interpreter::stackElementSize;
 492   }
 493 
 494   // Argument slot values may be loaded first into a register because
 495   // they might not fit into displacement.
 496   RegisterOrConstant arg_slot(const int st_off);
 497   RegisterOrConstant next_arg_slot(const int st_off);
 498 
 499   // Stores long into offset pointed to by base
 500   void store_c2i_long(Register r, Register base,
 501                       const int st_off, bool is_stack);
 502   void store_c2i_object(Register r, Register base,
 503                         const int st_off);
 504   void store_c2i_int(Register r, Register base,
 505                      const int st_off);
 506   void store_c2i_double(VMReg r_2,
 507                         VMReg r_1, Register base, const int st_off);
 508   void store_c2i_float(FloatRegister f, Register base,
 509                        const int st_off);
 510 
 511  public:
 512   void gen_c2i_adapter(int total_args_passed,
 513                               // VMReg max_arg,
 514                               int comp_args_on_stack, // VMRegStackSlots
 515                               const BasicType *sig_bt,
 516                               const VMRegPair *regs,
 517                               Label& skip_fixup);
 518   void gen_i2c_adapter(int total_args_passed,
 519                        // VMReg max_arg,
 520                        int comp_args_on_stack, // VMRegStackSlots
 521                        const BasicType *sig_bt,
 522                        const VMRegPair *regs);
 523 
 524   AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
 525 };
 526 
 527 
 528 // Patch the callers callsite with entry to compiled code if it exists.
 529 void AdapterGenerator::patch_callers_callsite() {
 530   Label L;
 531   __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
 532   __ br_null(G3_scratch, false, Assembler::pt, L);
 533   __ delayed()->nop();
 534   // Call into the VM to patch the caller, then jump to compiled callee
 535   __ save_frame(4);     // Args in compiled layout; do not blow them
 536 
 537   // Must save all the live Gregs the list is:
 538   // G1: 1st Long arg (32bit build)
 539   // G2: global allocated to TLS
 540   // G3: used in inline cache check (scratch)
 541   // G4: 2nd Long arg (32bit build);
 542   // G5: used in inline cache check (Method*)
 543 
 544   // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
 545 
 546 #ifdef _LP64
 547   // mov(s,d)
 548   __ mov(G1, L1);
 549   __ mov(G4, L4);
 550   __ mov(G5_method, L5);
 551   __ mov(G5_method, O0);         // VM needs target method
 552   __ mov(I7, O1);                // VM needs caller's callsite
 553   // Must be a leaf call...
 554   // can be very far once the blob has been relocated
 555   AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
 556   __ relocate(relocInfo::runtime_call_type);
 557   __ jumpl_to(dest, O7, O7);
 558   __ delayed()->mov(G2_thread, L7_thread_cache);
 559   __ mov(L7_thread_cache, G2_thread);
 560   __ mov(L1, G1);
 561   __ mov(L4, G4);
 562   __ mov(L5, G5_method);
 563 #else
 564   __ stx(G1, FP, -8 + STACK_BIAS);
 565   __ stx(G4, FP, -16 + STACK_BIAS);
 566   __ mov(G5_method, L5);
 567   __ mov(G5_method, O0);         // VM needs target method
 568   __ mov(I7, O1);                // VM needs caller's callsite
 569   // Must be a leaf call...
 570   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
 571   __ delayed()->mov(G2_thread, L7_thread_cache);
 572   __ mov(L7_thread_cache, G2_thread);
 573   __ ldx(FP, -8 + STACK_BIAS, G1);
 574   __ ldx(FP, -16 + STACK_BIAS, G4);
 575   __ mov(L5, G5_method);
 576 #endif /* _LP64 */
 577 
 578   __ restore();      // Restore args
 579   __ bind(L);
 580 }
 581 
 582 
 583 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
 584   RegisterOrConstant roc(arg_offset(st_off));
 585   return __ ensure_simm13_or_reg(roc, Rdisp);
 586 }
 587 
 588 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
 589   RegisterOrConstant roc(next_arg_offset(st_off));
 590   return __ ensure_simm13_or_reg(roc, Rdisp);
 591 }
 592 
 593 
 594 // Stores long into offset pointed to by base
 595 void AdapterGenerator::store_c2i_long(Register r, Register base,
 596                                       const int st_off, bool is_stack) {
 597 #ifdef _LP64
 598   // In V9, longs are given 2 64-bit slots in the interpreter, but the
 599   // data is passed in only 1 slot.
 600   __ stx(r, base, next_arg_slot(st_off));
 601 #else
 602 #ifdef COMPILER2
 603   // Misaligned store of 64-bit data
 604   __ stw(r, base, arg_slot(st_off));    // lo bits
 605   __ srlx(r, 32, r);
 606   __ stw(r, base, next_arg_slot(st_off));  // hi bits
 607 #else
 608   if (is_stack) {
 609     // Misaligned store of 64-bit data
 610     __ stw(r, base, arg_slot(st_off));    // lo bits
 611     __ srlx(r, 32, r);
 612     __ stw(r, base, next_arg_slot(st_off));  // hi bits
 613   } else {
 614     __ stw(r->successor(), base, arg_slot(st_off)     ); // lo bits
 615     __ stw(r             , base, next_arg_slot(st_off)); // hi bits
 616   }
 617 #endif // COMPILER2
 618 #endif // _LP64
 619 }
 620 
 621 void AdapterGenerator::store_c2i_object(Register r, Register base,
 622                       const int st_off) {
 623   __ st_ptr (r, base, arg_slot(st_off));
 624 }
 625 
 626 void AdapterGenerator::store_c2i_int(Register r, Register base,
 627                    const int st_off) {
 628   __ st (r, base, arg_slot(st_off));
 629 }
 630 
 631 // Stores into offset pointed to by base
 632 void AdapterGenerator::store_c2i_double(VMReg r_2,
 633                       VMReg r_1, Register base, const int st_off) {
 634 #ifdef _LP64
 635   // In V9, doubles are given 2 64-bit slots in the interpreter, but the
 636   // data is passed in only 1 slot.
 637   __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
 638 #else
 639   // Need to marshal 64-bit value from misaligned Lesp loads
 640   __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
 641   __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
 642 #endif
 643 }
 644 
 645 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
 646                                        const int st_off) {
 647   __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
 648 }
 649 
 650 void AdapterGenerator::gen_c2i_adapter(
 651                             int total_args_passed,
 652                             // VMReg max_arg,
 653                             int comp_args_on_stack, // VMRegStackSlots
 654                             const BasicType *sig_bt,
 655                             const VMRegPair *regs,
 656                             Label& L_skip_fixup) {
 657 
 658   // Before we get into the guts of the C2I adapter, see if we should be here
 659   // at all.  We've come from compiled code and are attempting to jump to the
 660   // interpreter, which means the caller made a static call to get here
 661   // (vcalls always get a compiled target if there is one).  Check for a
 662   // compiled target.  If there is one, we need to patch the caller's call.
 663   // However we will run interpreted if we come thru here. The next pass
 664   // thru the call site will run compiled. If we ran compiled here then
 665   // we can (theorectically) do endless i2c->c2i->i2c transitions during
 666   // deopt/uncommon trap cycles. If we always go interpreted here then
 667   // we can have at most one and don't need to play any tricks to keep
 668   // from endlessly growing the stack.
 669   //
 670   // Actually if we detected that we had an i2c->c2i transition here we
 671   // ought to be able to reset the world back to the state of the interpreted
 672   // call and not bother building another interpreter arg area. We don't
 673   // do that at this point.
 674 
 675   patch_callers_callsite();
 676 
 677   __ bind(L_skip_fixup);
 678 
 679   // Since all args are passed on the stack, total_args_passed*wordSize is the
 680   // space we need.  Add in varargs area needed by the interpreter. Round up
 681   // to stack alignment.
 682   const int arg_size = total_args_passed * Interpreter::stackElementSize;
 683   const int varargs_area =
 684                  (frame::varargs_offset - frame::register_save_words)*wordSize;
 685   const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
 686 
 687   const int bias = STACK_BIAS;
 688   const int interp_arg_offset = frame::varargs_offset*wordSize +
 689                         (total_args_passed-1)*Interpreter::stackElementSize;
 690 
 691   const Register base = SP;
 692 
 693   // Make some extra space on the stack.
 694   __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP);
 695   set_Rdisp(G3_scratch);
 696 
 697   // Write the args into the outgoing interpreter space.
 698   for (int i = 0; i < total_args_passed; i++) {
 699     const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
 700     VMReg r_1 = regs[i].first();
 701     VMReg r_2 = regs[i].second();
 702     if (!r_1->is_valid()) {
 703       assert(!r_2->is_valid(), "");
 704       continue;
 705     }
 706     if (r_1->is_stack()) {        // Pretend stack targets are loaded into G1
 707       RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias;
 708       ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp);
 709       r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
 710       if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
 711       else                  __ ldx(base, ld_off, G1_scratch);
 712     }
 713 
 714     if (r_1->is_Register()) {
 715       Register r = r_1->as_Register()->after_restore();
 716       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
 717         store_c2i_object(r, base, st_off);
 718       } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 719         store_c2i_long(r, base, st_off, r_2->is_stack());
 720       } else {
 721         store_c2i_int(r, base, st_off);
 722       }
 723     } else {
 724       assert(r_1->is_FloatRegister(), "");
 725       if (sig_bt[i] == T_FLOAT) {
 726         store_c2i_float(r_1->as_FloatRegister(), base, st_off);
 727       } else {
 728         assert(sig_bt[i] == T_DOUBLE, "wrong type");
 729         store_c2i_double(r_2, r_1, base, st_off);
 730       }
 731     }
 732   }
 733 
 734   // Load the interpreter entry point.
 735   __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
 736 
 737   // Pass O5_savedSP as an argument to the interpreter.
 738   // The interpreter will restore SP to this value before returning.
 739   __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP);
 740 
 741   __ mov((frame::varargs_offset)*wordSize -
 742          1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
 743   // Jump to the interpreter just as if interpreter was doing it.
 744   __ jmpl(G3_scratch, 0, G0);
 745   // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
 746   // (really L0) is in use by the compiled frame as a generic temp.  However,
 747   // the interpreter does not know where its args are without some kind of
 748   // arg pointer being passed in.  Pass it in Gargs.
 749   __ delayed()->add(SP, G1, Gargs);
 750 }
 751 
 752 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
 753                         address code_start, address code_end,
 754                         Label& L_ok) {
 755   Label L_fail;
 756   __ set(ExternalAddress(code_start), temp_reg);
 757   __ set(pointer_delta(code_end, code_start, 1), temp2_reg);
 758   __ cmp(pc_reg, temp_reg);
 759   __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
 760   __ delayed()->add(temp_reg, temp2_reg, temp_reg);
 761   __ cmp(pc_reg, temp_reg);
 762   __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
 763   __ bind(L_fail);
 764 }
 765 
 766 void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
 767                                        // VMReg max_arg,
 768                                        int comp_args_on_stack, // VMRegStackSlots
 769                                        const BasicType *sig_bt,
 770                                        const VMRegPair *regs) {
 771   // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
 772   // layout.  Lesp was saved by the calling I-frame and will be restored on
 773   // return.  Meanwhile, outgoing arg space is all owned by the callee
 774   // C-frame, so we can mangle it at will.  After adjusting the frame size,
 775   // hoist register arguments and repack other args according to the compiled
 776   // code convention.  Finally, end in a jump to the compiled code.  The entry
 777   // point address is the start of the buffer.
 778 
 779   // We will only enter here from an interpreted frame and never from after
 780   // passing thru a c2i. Azul allowed this but we do not. If we lose the
 781   // race and use a c2i we will remain interpreted for the race loser(s).
 782   // This removes all sorts of headaches on the x86 side and also eliminates
 783   // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
 784 
 785   // More detail:
 786   // Adapters can be frameless because they do not require the caller
 787   // to perform additional cleanup work, such as correcting the stack pointer.
 788   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 789   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 790   // even if a callee has modified the stack pointer.
 791   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 792   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 793   // up via the senderSP register).
 794   // In other words, if *either* the caller or callee is interpreted, we can
 795   // get the stack pointer repaired after a call.
 796   // This is why c2i and i2c adapters cannot be indefinitely composed.
 797   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 798   // both caller and callee would be compiled methods, and neither would
 799   // clean up the stack pointer changes performed by the two adapters.
 800   // If this happens, control eventually transfers back to the compiled
 801   // caller, but with an uncorrected stack, causing delayed havoc.
 802 
 803   if (VerifyAdapterCalls &&
 804       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 805     // So, let's test for cascading c2i/i2c adapters right now.
 806     //  assert(Interpreter::contains($return_addr) ||
 807     //         StubRoutines::contains($return_addr),
 808     //         "i2c adapter must return to an interpreter frame");
 809     __ block_comment("verify_i2c { ");
 810     Label L_ok;
 811     if (Interpreter::code() != NULL)
 812       range_check(masm, O7, O0, O1,
 813                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 814                   L_ok);
 815     if (StubRoutines::code1() != NULL)
 816       range_check(masm, O7, O0, O1,
 817                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 818                   L_ok);
 819     if (StubRoutines::code2() != NULL)
 820       range_check(masm, O7, O0, O1,
 821                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 822                   L_ok);
 823     const char* msg = "i2c adapter must return to an interpreter frame";
 824     __ block_comment(msg);
 825     __ stop(msg);
 826     __ bind(L_ok);
 827     __ block_comment("} verify_i2ce ");
 828   }
 829 
 830   // As you can see from the list of inputs & outputs there are not a lot
 831   // of temp registers to work with: mostly G1, G3 & G4.
 832 
 833   // Inputs:
 834   // G2_thread      - TLS
 835   // G5_method      - Method oop
 836   // G4 (Gargs)     - Pointer to interpreter's args
 837   // O0..O4         - free for scratch
 838   // O5_savedSP     - Caller's saved SP, to be restored if needed
 839   // O6             - Current SP!
 840   // O7             - Valid return address
 841   // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
 842 
 843   // Outputs:
 844   // G2_thread      - TLS
 845   // O0-O5          - Outgoing args in compiled layout
 846   // O6             - Adjusted or restored SP
 847   // O7             - Valid return address
 848   // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
 849   // F0-F7          - more outgoing args
 850 
 851 
 852   // Gargs is the incoming argument base, and also an outgoing argument.
 853   __ sub(Gargs, BytesPerWord, Gargs);
 854 
 855   // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
 856   // WITH O7 HOLDING A VALID RETURN PC
 857   //
 858   // |              |
 859   // :  java stack  :
 860   // |              |
 861   // +--------------+ <--- start of outgoing args
 862   // |   receiver   |   |
 863   // : rest of args :   |---size is java-arg-words
 864   // |              |   |
 865   // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
 866   // |              |   |
 867   // :    unused    :   |---Space for max Java stack, plus stack alignment
 868   // |              |   |
 869   // +--------------+ <--- SP + 16*wordsize
 870   // |              |
 871   // :    window    :
 872   // |              |
 873   // +--------------+ <--- SP
 874 
 875   // WE REPACK THE STACK.  We use the common calling convention layout as
 876   // discovered by calling SharedRuntime::calling_convention.  We assume it
 877   // causes an arbitrary shuffle of memory, which may require some register
 878   // temps to do the shuffle.  We hope for (and optimize for) the case where
 879   // temps are not needed.  We may have to resize the stack slightly, in case
 880   // we need alignment padding (32-bit interpreter can pass longs & doubles
 881   // misaligned, but the compilers expect them aligned).
 882   //
 883   // |              |
 884   // :  java stack  :
 885   // |              |
 886   // +--------------+ <--- start of outgoing args
 887   // |  pad, align  |   |
 888   // +--------------+   |
 889   // | ints, longs, |   |
 890   // |    floats,   |   |---Outgoing stack args.
 891   // :    doubles   :   |   First few args in registers.
 892   // |              |   |
 893   // +--------------+ <--- SP' + 16*wordsize
 894   // |              |
 895   // :    window    :
 896   // |              |
 897   // +--------------+ <--- SP'
 898 
 899   // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
 900   // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
 901   // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
 902 
 903   // Cut-out for having no stack args.  Since up to 6 args are passed
 904   // in registers, we will commonly have no stack args.
 905   if (comp_args_on_stack > 0) {
 906     // Convert VMReg stack slots to words.
 907     int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 908     // Round up to miminum stack alignment, in wordSize
 909     comp_words_on_stack = round_to(comp_words_on_stack, 2);
 910     // Now compute the distance from Lesp to SP.  This calculation does not
 911     // include the space for total_args_passed because Lesp has not yet popped
 912     // the arguments.
 913     __ sub(SP, (comp_words_on_stack)*wordSize, SP);
 914   }
 915 
 916   // Now generate the shuffle code.  Pick up all register args and move the
 917   // rest through G1_scratch.
 918   for (int i = 0; i < total_args_passed; i++) {
 919     if (sig_bt[i] == T_VOID) {
 920       // Longs and doubles are passed in native word order, but misaligned
 921       // in the 32-bit build.
 922       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 923       continue;
 924     }
 925 
 926     // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
 927     // 32-bit build and aligned in the 64-bit build.  Look for the obvious
 928     // ldx/lddf optimizations.
 929 
 930     // Load in argument order going down.
 931     const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
 932     set_Rdisp(G1_scratch);
 933 
 934     VMReg r_1 = regs[i].first();
 935     VMReg r_2 = regs[i].second();
 936     if (!r_1->is_valid()) {
 937       assert(!r_2->is_valid(), "");
 938       continue;
 939     }
 940     if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
 941       r_1 = F8->as_VMReg();        // as part of the load/store shuffle
 942       if (r_2->is_valid()) r_2 = r_1->next();
 943     }
 944     if (r_1->is_Register()) {  // Register argument
 945       Register r = r_1->as_Register()->after_restore();
 946       if (!r_2->is_valid()) {
 947         __ ld(Gargs, arg_slot(ld_off), r);
 948       } else {
 949 #ifdef _LP64
 950         // In V9, longs are given 2 64-bit slots in the interpreter, but the
 951         // data is passed in only 1 slot.
 952         RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
 953               next_arg_slot(ld_off) : arg_slot(ld_off);
 954         __ ldx(Gargs, slot, r);
 955 #else
 956         fatal("longs should be on stack");
 957 #endif
 958       }
 959     } else {
 960       assert(r_1->is_FloatRegister(), "");
 961       if (!r_2->is_valid()) {
 962         __ ldf(FloatRegisterImpl::S, Gargs,      arg_slot(ld_off), r_1->as_FloatRegister());
 963       } else {
 964 #ifdef _LP64
 965         // In V9, doubles are given 2 64-bit slots in the interpreter, but the
 966         // data is passed in only 1 slot.  This code also handles longs that
 967         // are passed on the stack, but need a stack-to-stack move through a
 968         // spare float register.
 969         RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
 970               next_arg_slot(ld_off) : arg_slot(ld_off);
 971         __ ldf(FloatRegisterImpl::D, Gargs,                  slot, r_1->as_FloatRegister());
 972 #else
 973         // Need to marshal 64-bit value from misaligned Lesp loads
 974         __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
 975         __ ldf(FloatRegisterImpl::S, Gargs,      arg_slot(ld_off), r_2->as_FloatRegister());
 976 #endif
 977       }
 978     }
 979     // Was the argument really intended to be on the stack, but was loaded
 980     // into F8/F9?
 981     if (regs[i].first()->is_stack()) {
 982       assert(r_1->as_FloatRegister() == F8, "fix this code");
 983       // Convert stack slot to an SP offset
 984       int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
 985       // Store down the shuffled stack word.  Target address _is_ aligned.
 986       RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
 987       if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
 988       else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
 989     }
 990   }
 991 
 992   // Jump to the compiled code just as if compiled code was doing it.
 993   __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
 994 #if INCLUDE_JVMCI
 995   if (EnableJVMCI) {
 996     // check if this call should be routed towards a specific entry point
 997     __ ld(Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), G1);
 998     __ cmp(G0, G1);
 999     Label no_alternative_target;
1000     __ br(Assembler::equal, false, Assembler::pn, no_alternative_target);
1001     __ delayed()->nop();
1002 
1003     __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()), G3);
1004     __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1005 
1006     __ bind(no_alternative_target);
1007   }
1008 #endif // INCLUDE_JVMCI
1009 
1010   // 6243940 We might end up in handle_wrong_method if
1011   // the callee is deoptimized as we race thru here. If that
1012   // happens we don't want to take a safepoint because the
1013   // caller frame will look interpreted and arguments are now
1014   // "compiled" so it is much better to make this transition
1015   // invisible to the stack walking code. Unfortunately if
1016   // we try and find the callee by normal means a safepoint
1017   // is possible. So we stash the desired callee in the thread
1018   // and the vm will find there should this case occur.
1019   Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1020   __ st_ptr(G5_method, callee_target_addr);
1021   __ jmpl(G3, 0, G0);
1022   __ delayed()->nop();
1023 }
1024 
1025 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
1026                                     int total_args_passed,
1027                                     int comp_args_on_stack,
1028                                     const BasicType *sig_bt,
1029                                     const VMRegPair *regs) {
1030   AdapterGenerator agen(masm);
1031   agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1032 }
1033 
1034 // ---------------------------------------------------------------
1035 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1036                                                             int total_args_passed,
1037                                                             // VMReg max_arg,
1038                                                             int comp_args_on_stack, // VMRegStackSlots
1039                                                             const BasicType *sig_bt,
1040                                                             const VMRegPair *regs,
1041                                                             AdapterFingerPrint* fingerprint) {
1042   address i2c_entry = __ pc();
1043 
1044   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
1045 
1046 
1047   // -------------------------------------------------------------------------
1048   // Generate a C2I adapter.  On entry we know G5 holds the Method*.  The
1049   // args start out packed in the compiled layout.  They need to be unpacked
1050   // into the interpreter layout.  This will almost always require some stack
1051   // space.  We grow the current (compiled) stack, then repack the args.  We
1052   // finally end in a jump to the generic interpreter entry point.  On exit
1053   // from the interpreter, the interpreter will restore our SP (lest the
1054   // compiled code, which relys solely on SP and not FP, get sick).
1055 
1056   address c2i_unverified_entry = __ pc();
1057   Label L_skip_fixup;
1058   {
1059     Register R_temp = G1;  // another scratch register
1060 
1061     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1062 
1063     __ verify_oop(O0);
1064     __ load_klass(O0, G3_scratch);
1065 
1066     __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
1067     __ cmp(G3_scratch, R_temp);
1068 
1069     Label ok, ok2;
1070     __ brx(Assembler::equal, false, Assembler::pt, ok);
1071     __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
1072     __ jump_to(ic_miss, G3_scratch);
1073     __ delayed()->nop();
1074 
1075     __ bind(ok);
1076     // Method might have been compiled since the call site was patched to
1077     // interpreted if that is the case treat it as a miss so we can get
1078     // the call site corrected.
1079     __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
1080     __ bind(ok2);
1081     __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup);
1082     __ delayed()->nop();
1083     __ jump_to(ic_miss, G3_scratch);
1084     __ delayed()->nop();
1085 
1086   }
1087 
1088   address c2i_entry = __ pc();
1089   AdapterGenerator agen(masm);
1090   agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup);
1091 
1092   __ flush();
1093   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1094 
1095 }
1096 
1097 // Helper function for native calling conventions
1098 static VMReg int_stk_helper( int i ) {
1099   // Bias any stack based VMReg we get by ignoring the window area
1100   // but not the register parameter save area.
1101   //
1102   // This is strange for the following reasons. We'd normally expect
1103   // the calling convention to return an VMReg for a stack slot
1104   // completely ignoring any abi reserved area. C2 thinks of that
1105   // abi area as only out_preserve_stack_slots. This does not include
1106   // the area allocated by the C abi to store down integer arguments
1107   // because the java calling convention does not use it. So
1108   // since c2 assumes that there are only out_preserve_stack_slots
1109   // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1110   // location the c calling convention must add in this bias amount
1111   // to make up for the fact that the out_preserve_stack_slots is
1112   // insufficient for C calls. What a mess. I sure hope those 6
1113   // stack words were worth it on every java call!
1114 
1115   // Another way of cleaning this up would be for out_preserve_stack_slots
1116   // to take a parameter to say whether it was C or java calling conventions.
1117   // Then things might look a little better (but not much).
1118 
1119   int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1120   if( mem_parm_offset < 0 ) {
1121     return as_oRegister(i)->as_VMReg();
1122   } else {
1123     int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1124     // Now return a biased offset that will be correct when out_preserve_slots is added back in
1125     return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1126   }
1127 }
1128 
1129 
1130 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1131                                          VMRegPair *regs,
1132                                          VMRegPair *regs2,
1133                                          int total_args_passed) {
1134     assert(regs2 == NULL, "not needed on sparc");
1135 
1136     // Return the number of VMReg stack_slots needed for the args.
1137     // This value does not include an abi space (like register window
1138     // save area).
1139 
1140     // The native convention is V8 if !LP64
1141     // The LP64 convention is the V9 convention which is slightly more sane.
1142 
1143     // We return the amount of VMReg stack slots we need to reserve for all
1144     // the arguments NOT counting out_preserve_stack_slots. Since we always
1145     // have space for storing at least 6 registers to memory we start with that.
1146     // See int_stk_helper for a further discussion.
1147     int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1148 
1149 #ifdef _LP64
1150     // V9 convention: All things "as-if" on double-wide stack slots.
1151     // Hoist any int/ptr/long's in the first 6 to int regs.
1152     // Hoist any flt/dbl's in the first 16 dbl regs.
1153     int j = 0;                  // Count of actual args, not HALVES
1154     VMRegPair param_array_reg;  // location of the argument in the parameter array
1155     for (int i = 0; i < total_args_passed; i++, j++) {
1156       param_array_reg.set_bad();
1157       switch (sig_bt[i]) {
1158       case T_BOOLEAN:
1159       case T_BYTE:
1160       case T_CHAR:
1161       case T_INT:
1162       case T_SHORT:
1163         regs[i].set1(int_stk_helper(j));
1164         break;
1165       case T_LONG:
1166         assert(sig_bt[i+1] == T_VOID, "expecting half");
1167       case T_ADDRESS: // raw pointers, like current thread, for VM calls
1168       case T_ARRAY:
1169       case T_OBJECT:
1170       case T_METADATA:
1171         regs[i].set2(int_stk_helper(j));
1172         break;
1173       case T_FLOAT:
1174         // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
1175         // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
1176         //
1177         // "When a callee prototype exists, and does not indicate variable arguments,
1178         // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
1179         // will be promoted to floating-point registers"
1180         //
1181         // By "promoted" it means that the argument is located in two places, an unused
1182         // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
1183         // float register.  In most cases, there are 6 or fewer arguments of any type,
1184         // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
1185         // serve as shadow slots.  Per the spec floating point registers %d6 to %d16
1186         // require slots beyond that (up to %sp+BIAS+248).
1187         //
1188         {
1189           // V9ism: floats go in ODD registers and stack slots
1190           int float_index = 1 + (j << 1);
1191           param_array_reg.set1(VMRegImpl::stack2reg(float_index));
1192           if (j < 16) {
1193             regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
1194           } else {
1195             regs[i] = param_array_reg;
1196           }
1197         }
1198         break;
1199       case T_DOUBLE:
1200         {
1201           assert(sig_bt[i + 1] == T_VOID, "expecting half");
1202           // V9ism: doubles go in EVEN/ODD regs and stack slots
1203           int double_index = (j << 1);
1204           param_array_reg.set2(VMRegImpl::stack2reg(double_index));
1205           if (j < 16) {
1206             regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
1207           } else {
1208             // V9ism: doubles go in EVEN/ODD stack slots
1209             regs[i] = param_array_reg;
1210           }
1211         }
1212         break;
1213       case T_VOID:
1214         regs[i].set_bad();
1215         j--;
1216         break; // Do not count HALVES
1217       default:
1218         ShouldNotReachHere();
1219       }
1220       // Keep track of the deepest parameter array slot.
1221       if (!param_array_reg.first()->is_valid()) {
1222         param_array_reg = regs[i];
1223       }
1224       if (param_array_reg.first()->is_stack()) {
1225         int off = param_array_reg.first()->reg2stack();
1226         if (off > max_stack_slots) max_stack_slots = off;
1227       }
1228       if (param_array_reg.second()->is_stack()) {
1229         int off = param_array_reg.second()->reg2stack();
1230         if (off > max_stack_slots) max_stack_slots = off;
1231       }
1232     }
1233 
1234 #else // _LP64
1235     // V8 convention: first 6 things in O-regs, rest on stack.
1236     // Alignment is willy-nilly.
1237     for (int i = 0; i < total_args_passed; i++) {
1238       switch (sig_bt[i]) {
1239       case T_ADDRESS: // raw pointers, like current thread, for VM calls
1240       case T_ARRAY:
1241       case T_BOOLEAN:
1242       case T_BYTE:
1243       case T_CHAR:
1244       case T_FLOAT:
1245       case T_INT:
1246       case T_OBJECT:
1247       case T_METADATA:
1248       case T_SHORT:
1249         regs[i].set1(int_stk_helper(i));
1250         break;
1251       case T_DOUBLE:
1252       case T_LONG:
1253         assert(sig_bt[i + 1] == T_VOID, "expecting half");
1254         regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
1255         break;
1256       case T_VOID: regs[i].set_bad(); break;
1257       default:
1258         ShouldNotReachHere();
1259       }
1260       if (regs[i].first()->is_stack()) {
1261         int off = regs[i].first()->reg2stack();
1262         if (off > max_stack_slots) max_stack_slots = off;
1263       }
1264       if (regs[i].second()->is_stack()) {
1265         int off = regs[i].second()->reg2stack();
1266         if (off > max_stack_slots) max_stack_slots = off;
1267       }
1268     }
1269 #endif // _LP64
1270 
1271   return round_to(max_stack_slots + 1, 2);
1272 
1273 }
1274 
1275 
1276 // ---------------------------------------------------------------------------
1277 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1278   switch (ret_type) {
1279   case T_FLOAT:
1280     __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1281     break;
1282   case T_DOUBLE:
1283     __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1284     break;
1285   }
1286 }
1287 
1288 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1289   switch (ret_type) {
1290   case T_FLOAT:
1291     __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1292     break;
1293   case T_DOUBLE:
1294     __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1295     break;
1296   }
1297 }
1298 
1299 // Check and forward and pending exception.  Thread is stored in
1300 // L7_thread_cache and possibly NOT in G2_thread.  Since this is a native call, there
1301 // is no exception handler.  We merely pop this frame off and throw the
1302 // exception in the caller's frame.
1303 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1304   Label L;
1305   __ br_null(Rex_oop, false, Assembler::pt, L);
1306   __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1307   // Since this is a native call, we *know* the proper exception handler
1308   // without calling into the VM: it's the empty function.  Just pop this
1309   // frame and then jump to forward_exception_entry; O7 will contain the
1310   // native caller's return PC.
1311  AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1312   __ jump_to(exception_entry, G3_scratch);
1313   __ delayed()->restore();      // Pop this frame off.
1314   __ bind(L);
1315 }
1316 
1317 // A simple move of integer like type
1318 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1319   if (src.first()->is_stack()) {
1320     if (dst.first()->is_stack()) {
1321       // stack to stack
1322       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1323       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1324     } else {
1325       // stack to reg
1326       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1327     }
1328   } else if (dst.first()->is_stack()) {
1329     // reg to stack
1330     __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1331   } else {
1332     __ mov(src.first()->as_Register(), dst.first()->as_Register());
1333   }
1334 }
1335 
1336 // On 64 bit we will store integer like items to the stack as
1337 // 64 bits items (sparc abi) even though java would only store
1338 // 32bits for a parameter. On 32bit it will simply be 32 bits
1339 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1340 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1341   if (src.first()->is_stack()) {
1342     if (dst.first()->is_stack()) {
1343       // stack to stack
1344       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1345       __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1346     } else {
1347       // stack to reg
1348       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1349     }
1350   } else if (dst.first()->is_stack()) {
1351     // reg to stack
1352     // Some compilers (gcc) expect a clean 32 bit value on function entry
1353     __ signx(src.first()->as_Register(), L5);
1354     __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1355   } else {
1356     // Some compilers (gcc) expect a clean 32 bit value on function entry
1357     __ signx(src.first()->as_Register(), dst.first()->as_Register());
1358   }
1359 }
1360 
1361 
1362 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1363   if (src.first()->is_stack()) {
1364     if (dst.first()->is_stack()) {
1365       // stack to stack
1366       __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1367       __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1368     } else {
1369       // stack to reg
1370       __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1371     }
1372   } else if (dst.first()->is_stack()) {
1373     // reg to stack
1374     __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1375   } else {
1376     __ mov(src.first()->as_Register(), dst.first()->as_Register());
1377   }
1378 }
1379 
1380 
1381 // An oop arg. Must pass a handle not the oop itself
1382 static void object_move(MacroAssembler* masm,
1383                         OopMap* map,
1384                         int oop_handle_offset,
1385                         int framesize_in_slots,
1386                         VMRegPair src,
1387                         VMRegPair dst,
1388                         bool is_receiver,
1389                         int* receiver_offset) {
1390 
1391   // must pass a handle. First figure out the location we use as a handle
1392 
1393   if (src.first()->is_stack()) {
1394     // Oop is already on the stack
1395     Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1396     __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1397     __ ld_ptr(rHandle, 0, L4);
1398 #ifdef _LP64
1399     __ movr( Assembler::rc_z, L4, G0, rHandle );
1400 #else
1401     __ tst( L4 );
1402     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1403 #endif
1404     if (dst.first()->is_stack()) {
1405       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1406     }
1407     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1408     if (is_receiver) {
1409       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1410     }
1411     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1412   } else {
1413     // Oop is in an input register pass we must flush it to the stack
1414     const Register rOop = src.first()->as_Register();
1415     const Register rHandle = L5;
1416     int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1417     int offset = oop_slot * VMRegImpl::stack_slot_size;
1418     __ st_ptr(rOop, SP, offset + STACK_BIAS);
1419     if (is_receiver) {
1420        *receiver_offset = offset;
1421     }
1422     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1423     __ add(SP, offset + STACK_BIAS, rHandle);
1424 #ifdef _LP64
1425     __ movr( Assembler::rc_z, rOop, G0, rHandle );
1426 #else
1427     __ tst( rOop );
1428     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1429 #endif
1430 
1431     if (dst.first()->is_stack()) {
1432       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1433     } else {
1434       __ mov(rHandle, dst.first()->as_Register());
1435     }
1436   }
1437 }
1438 
1439 // A float arg may have to do float reg int reg conversion
1440 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1441   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1442 
1443   if (src.first()->is_stack()) {
1444     if (dst.first()->is_stack()) {
1445       // stack to stack the easiest of the bunch
1446       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1447       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1448     } else {
1449       // stack to reg
1450       if (dst.first()->is_Register()) {
1451         __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1452       } else {
1453         __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1454       }
1455     }
1456   } else if (dst.first()->is_stack()) {
1457     // reg to stack
1458     if (src.first()->is_Register()) {
1459       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1460     } else {
1461       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1462     }
1463   } else {
1464     // reg to reg
1465     if (src.first()->is_Register()) {
1466       if (dst.first()->is_Register()) {
1467         // gpr -> gpr
1468         __ mov(src.first()->as_Register(), dst.first()->as_Register());
1469       } else {
1470         // gpr -> fpr
1471         __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1472         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1473       }
1474     } else if (dst.first()->is_Register()) {
1475       // fpr -> gpr
1476       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1477       __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1478     } else {
1479       // fpr -> fpr
1480       // In theory these overlap but the ordering is such that this is likely a nop
1481       if ( src.first() != dst.first()) {
1482         __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1483       }
1484     }
1485   }
1486 }
1487 
1488 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1489   VMRegPair src_lo(src.first());
1490   VMRegPair src_hi(src.second());
1491   VMRegPair dst_lo(dst.first());
1492   VMRegPair dst_hi(dst.second());
1493   simple_move32(masm, src_lo, dst_lo);
1494   simple_move32(masm, src_hi, dst_hi);
1495 }
1496 
1497 // A long move
1498 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1499 
1500   // Do the simple ones here else do two int moves
1501   if (src.is_single_phys_reg() ) {
1502     if (dst.is_single_phys_reg()) {
1503       __ mov(src.first()->as_Register(), dst.first()->as_Register());
1504     } else {
1505       // split src into two separate registers
1506       // Remember hi means hi address or lsw on sparc
1507       // Move msw to lsw
1508       if (dst.second()->is_reg()) {
1509         // MSW -> MSW
1510         __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1511         // Now LSW -> LSW
1512         // this will only move lo -> lo and ignore hi
1513         VMRegPair split(dst.second());
1514         simple_move32(masm, src, split);
1515       } else {
1516         VMRegPair split(src.first(), L4->as_VMReg());
1517         // MSW -> MSW (lo ie. first word)
1518         __ srax(src.first()->as_Register(), 32, L4);
1519         split_long_move(masm, split, dst);
1520       }
1521     }
1522   } else if (dst.is_single_phys_reg()) {
1523     if (src.is_adjacent_aligned_on_stack(2)) {
1524       __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1525     } else {
1526       // dst is a single reg.
1527       // Remember lo is low address not msb for stack slots
1528       // and lo is the "real" register for registers
1529       // src is
1530 
1531       VMRegPair split;
1532 
1533       if (src.first()->is_reg()) {
1534         // src.lo (msw) is a reg, src.hi is stk/reg
1535         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1536         split.set_pair(dst.first(), src.first());
1537       } else {
1538         // msw is stack move to L5
1539         // lsw is stack move to dst.lo (real reg)
1540         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1541         split.set_pair(dst.first(), L5->as_VMReg());
1542       }
1543 
1544       // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1545       // msw   -> src.lo/L5,  lsw -> dst.lo
1546       split_long_move(masm, src, split);
1547 
1548       // So dst now has the low order correct position the
1549       // msw half
1550       __ sllx(split.first()->as_Register(), 32, L5);
1551 
1552       const Register d = dst.first()->as_Register();
1553       __ or3(L5, d, d);
1554     }
1555   } else {
1556     // For LP64 we can probably do better.
1557     split_long_move(masm, src, dst);
1558   }
1559 }
1560 
1561 // A double move
1562 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1563 
1564   // The painful thing here is that like long_move a VMRegPair might be
1565   // 1: a single physical register
1566   // 2: two physical registers (v8)
1567   // 3: a physical reg [lo] and a stack slot [hi] (v8)
1568   // 4: two stack slots
1569 
1570   // Since src is always a java calling convention we know that the src pair
1571   // is always either all registers or all stack (and aligned?)
1572 
1573   // in a register [lo] and a stack slot [hi]
1574   if (src.first()->is_stack()) {
1575     if (dst.first()->is_stack()) {
1576       // stack to stack the easiest of the bunch
1577       // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1578       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1579       __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1580       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1581       __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1582     } else {
1583       // stack to reg
1584       if (dst.second()->is_stack()) {
1585         // stack -> reg, stack -> stack
1586         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1587         if (dst.first()->is_Register()) {
1588           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1589         } else {
1590           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1591         }
1592         // This was missing. (very rare case)
1593         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1594       } else {
1595         // stack -> reg
1596         // Eventually optimize for alignment QQQ
1597         if (dst.first()->is_Register()) {
1598           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1599           __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1600         } else {
1601           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1602           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1603         }
1604       }
1605     }
1606   } else if (dst.first()->is_stack()) {
1607     // reg to stack
1608     if (src.first()->is_Register()) {
1609       // Eventually optimize for alignment QQQ
1610       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1611       if (src.second()->is_stack()) {
1612         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1613         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1614       } else {
1615         __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1616       }
1617     } else {
1618       // fpr to stack
1619       if (src.second()->is_stack()) {
1620         ShouldNotReachHere();
1621       } else {
1622         // Is the stack aligned?
1623         if (reg2offset(dst.first()) & 0x7) {
1624           // No do as pairs
1625           __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1626           __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1627         } else {
1628           __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1629         }
1630       }
1631     }
1632   } else {
1633     // reg to reg
1634     if (src.first()->is_Register()) {
1635       if (dst.first()->is_Register()) {
1636         // gpr -> gpr
1637         __ mov(src.first()->as_Register(), dst.first()->as_Register());
1638         __ mov(src.second()->as_Register(), dst.second()->as_Register());
1639       } else {
1640         // gpr -> fpr
1641         // ought to be able to do a single store
1642         __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1643         __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1644         // ought to be able to do a single load
1645         __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1646         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1647       }
1648     } else if (dst.first()->is_Register()) {
1649       // fpr -> gpr
1650       // ought to be able to do a single store
1651       __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1652       // ought to be able to do a single load
1653       // REMEMBER first() is low address not LSB
1654       __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1655       if (dst.second()->is_Register()) {
1656         __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1657       } else {
1658         __ ld(FP, -4 + STACK_BIAS, L4);
1659         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1660       }
1661     } else {
1662       // fpr -> fpr
1663       // In theory these overlap but the ordering is such that this is likely a nop
1664       if ( src.first() != dst.first()) {
1665         __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1666       }
1667     }
1668   }
1669 }
1670 
1671 // Creates an inner frame if one hasn't already been created, and
1672 // saves a copy of the thread in L7_thread_cache
1673 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1674   if (!*already_created) {
1675     __ save_frame(0);
1676     // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1677     // Don't use save_thread because it smashes G2 and we merely want to save a
1678     // copy
1679     __ mov(G2_thread, L7_thread_cache);
1680     *already_created = true;
1681   }
1682 }
1683 
1684 
1685 static void save_or_restore_arguments(MacroAssembler* masm,
1686                                       const int stack_slots,
1687                                       const int total_in_args,
1688                                       const int arg_save_area,
1689                                       OopMap* map,
1690                                       VMRegPair* in_regs,
1691                                       BasicType* in_sig_bt) {
1692   // if map is non-NULL then the code should store the values,
1693   // otherwise it should load them.
1694   if (map != NULL) {
1695     // Fill in the map
1696     for (int i = 0; i < total_in_args; i++) {
1697       if (in_sig_bt[i] == T_ARRAY) {
1698         if (in_regs[i].first()->is_stack()) {
1699           int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1700           map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1701         } else if (in_regs[i].first()->is_Register()) {
1702           map->set_oop(in_regs[i].first());
1703         } else {
1704           ShouldNotReachHere();
1705         }
1706       }
1707     }
1708   }
1709 
1710   // Save or restore double word values
1711   int handle_index = 0;
1712   for (int i = 0; i < total_in_args; i++) {
1713     int slot = handle_index + arg_save_area;
1714     int offset = slot * VMRegImpl::stack_slot_size;
1715     if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
1716       const Register reg = in_regs[i].first()->as_Register();
1717       if (reg->is_global()) {
1718         handle_index += 2;
1719         assert(handle_index <= stack_slots, "overflow");
1720         if (map != NULL) {
1721           __ stx(reg, SP, offset + STACK_BIAS);
1722         } else {
1723           __ ldx(SP, offset + STACK_BIAS, reg);
1724         }
1725       }
1726     } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
1727       handle_index += 2;
1728       assert(handle_index <= stack_slots, "overflow");
1729       if (map != NULL) {
1730         __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1731       } else {
1732         __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1733       }
1734     }
1735   }
1736   // Save floats
1737   for (int i = 0; i < total_in_args; i++) {
1738     int slot = handle_index + arg_save_area;
1739     int offset = slot * VMRegImpl::stack_slot_size;
1740     if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
1741       handle_index++;
1742       assert(handle_index <= stack_slots, "overflow");
1743       if (map != NULL) {
1744         __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1745       } else {
1746         __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1747       }
1748     }
1749   }
1750 
1751 }
1752 
1753 
1754 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1755 // keeps a new JNI critical region from starting until a GC has been
1756 // forced.  Save down any oops in registers and describe them in an
1757 // OopMap.
1758 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1759                                                const int stack_slots,
1760                                                const int total_in_args,
1761                                                const int arg_save_area,
1762                                                OopMapSet* oop_maps,
1763                                                VMRegPair* in_regs,
1764                                                BasicType* in_sig_bt) {
1765   __ block_comment("check GCLocker::needs_gc");
1766   Label cont;
1767   AddressLiteral sync_state(GCLocker::needs_gc_address());
1768   __ load_bool_contents(sync_state, G3_scratch);
1769   __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
1770   __ delayed()->nop();
1771 
1772   // Save down any values that are live in registers and call into the
1773   // runtime to halt for a GC
1774   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1775   save_or_restore_arguments(masm, stack_slots, total_in_args,
1776                             arg_save_area, map, in_regs, in_sig_bt);
1777 
1778   __ mov(G2_thread, L7_thread_cache);
1779 
1780   __ set_last_Java_frame(SP, noreg);
1781 
1782   __ block_comment("block_for_jni_critical");
1783   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
1784   __ delayed()->mov(L7_thread_cache, O0);
1785   oop_maps->add_gc_map( __ offset(), map);
1786 
1787   __ restore_thread(L7_thread_cache); // restore G2_thread
1788   __ reset_last_Java_frame();
1789 
1790   // Reload all the register arguments
1791   save_or_restore_arguments(masm, stack_slots, total_in_args,
1792                             arg_save_area, NULL, in_regs, in_sig_bt);
1793 
1794   __ bind(cont);
1795 #ifdef ASSERT
1796   if (StressCriticalJNINatives) {
1797     // Stress register saving
1798     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1799     save_or_restore_arguments(masm, stack_slots, total_in_args,
1800                               arg_save_area, map, in_regs, in_sig_bt);
1801     // Destroy argument registers
1802     for (int i = 0; i < total_in_args; i++) {
1803       if (in_regs[i].first()->is_Register()) {
1804         const Register reg = in_regs[i].first()->as_Register();
1805         if (reg->is_global()) {
1806           __ mov(G0, reg);
1807         }
1808       } else if (in_regs[i].first()->is_FloatRegister()) {
1809         __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
1810       }
1811     }
1812 
1813     save_or_restore_arguments(masm, stack_slots, total_in_args,
1814                               arg_save_area, NULL, in_regs, in_sig_bt);
1815   }
1816 #endif
1817 }
1818 
1819 // Unpack an array argument into a pointer to the body and the length
1820 // if the array is non-null, otherwise pass 0 for both.
1821 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1822   // Pass the length, ptr pair
1823   Label is_null, done;
1824   if (reg.first()->is_stack()) {
1825     VMRegPair tmp  = reg64_to_VMRegPair(L2);
1826     // Load the arg up from the stack
1827     move_ptr(masm, reg, tmp);
1828     reg = tmp;
1829   }
1830   __ cmp(reg.first()->as_Register(), G0);
1831   __ brx(Assembler::equal, false, Assembler::pt, is_null);
1832   __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
1833   move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
1834   __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
1835   move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
1836   __ ba_short(done);
1837   __ bind(is_null);
1838   // Pass zeros
1839   move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
1840   move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
1841   __ bind(done);
1842 }
1843 
1844 static void verify_oop_args(MacroAssembler* masm,
1845                             methodHandle method,
1846                             const BasicType* sig_bt,
1847                             const VMRegPair* regs) {
1848   Register temp_reg = G5_method;  // not part of any compiled calling seq
1849   if (VerifyOops) {
1850     for (int i = 0; i < method->size_of_parameters(); i++) {
1851       if (sig_bt[i] == T_OBJECT ||
1852           sig_bt[i] == T_ARRAY) {
1853         VMReg r = regs[i].first();
1854         assert(r->is_valid(), "bad oop arg");
1855         if (r->is_stack()) {
1856           RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1857           ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
1858           __ ld_ptr(SP, ld_off, temp_reg);
1859           __ verify_oop(temp_reg);
1860         } else {
1861           __ verify_oop(r->as_Register());
1862         }
1863       }
1864     }
1865   }
1866 }
1867 
1868 static void gen_special_dispatch(MacroAssembler* masm,
1869                                  methodHandle method,
1870                                  const BasicType* sig_bt,
1871                                  const VMRegPair* regs) {
1872   verify_oop_args(masm, method, sig_bt, regs);
1873   vmIntrinsics::ID iid = method->intrinsic_id();
1874 
1875   // Now write the args into the outgoing interpreter space
1876   bool     has_receiver   = false;
1877   Register receiver_reg   = noreg;
1878   int      member_arg_pos = -1;
1879   Register member_reg     = noreg;
1880   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1881   if (ref_kind != 0) {
1882     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1883     member_reg = G5_method;  // known to be free at this point
1884     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1885   } else if (iid == vmIntrinsics::_invokeBasic) {
1886     has_receiver = true;
1887   } else {
1888     fatal("unexpected intrinsic id %d", iid);
1889   }
1890 
1891   if (member_reg != noreg) {
1892     // Load the member_arg into register, if necessary.
1893     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1894     VMReg r = regs[member_arg_pos].first();
1895     if (r->is_stack()) {
1896       RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1897       ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1898       __ ld_ptr(SP, ld_off, member_reg);
1899     } else {
1900       // no data motion is needed
1901       member_reg = r->as_Register();
1902     }
1903   }
1904 
1905   if (has_receiver) {
1906     // Make sure the receiver is loaded into a register.
1907     assert(method->size_of_parameters() > 0, "oob");
1908     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1909     VMReg r = regs[0].first();
1910     assert(r->is_valid(), "bad receiver arg");
1911     if (r->is_stack()) {
1912       // Porting note:  This assumes that compiled calling conventions always
1913       // pass the receiver oop in a register.  If this is not true on some
1914       // platform, pick a temp and load the receiver from stack.
1915       fatal("receiver always in a register");
1916       receiver_reg = G3_scratch;  // known to be free at this point
1917       RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1918       ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1919       __ ld_ptr(SP, ld_off, receiver_reg);
1920     } else {
1921       // no data motion is needed
1922       receiver_reg = r->as_Register();
1923     }
1924   }
1925 
1926   // Figure out which address we are really jumping to:
1927   MethodHandles::generate_method_handle_dispatch(masm, iid,
1928                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1929 }
1930 
1931 // ---------------------------------------------------------------------------
1932 // Generate a native wrapper for a given method.  The method takes arguments
1933 // in the Java compiled code convention, marshals them to the native
1934 // convention (handlizes oops, etc), transitions to native, makes the call,
1935 // returns to java state (possibly blocking), unhandlizes any result and
1936 // returns.
1937 //
1938 // Critical native functions are a shorthand for the use of
1939 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1940 // functions.  The wrapper is expected to unpack the arguments before
1941 // passing them to the callee and perform checks before and after the
1942 // native call to ensure that they GCLocker
1943 // lock_critical/unlock_critical semantics are followed.  Some other
1944 // parts of JNI setup are skipped like the tear down of the JNI handle
1945 // block and the check for pending exceptions it's impossible for them
1946 // to be thrown.
1947 //
1948 // They are roughly structured like this:
1949 //    if (GCLocker::needs_gc())
1950 //      SharedRuntime::block_for_jni_critical();
1951 //    tranistion to thread_in_native
1952 //    unpack arrray arguments and call native entry point
1953 //    check for safepoint in progress
1954 //    check if any thread suspend flags are set
1955 //      call into JVM and possible unlock the JNI critical
1956 //      if a GC was suppressed while in the critical native.
1957 //    transition back to thread_in_Java
1958 //    return to caller
1959 //
1960 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1961                                                 const methodHandle& method,
1962                                                 int compile_id,
1963                                                 BasicType* in_sig_bt,
1964                                                 VMRegPair* in_regs,
1965                                                 BasicType ret_type) {
1966   if (method->is_method_handle_intrinsic()) {
1967     vmIntrinsics::ID iid = method->intrinsic_id();
1968     intptr_t start = (intptr_t)__ pc();
1969     int vep_offset = ((intptr_t)__ pc()) - start;
1970     gen_special_dispatch(masm,
1971                          method,
1972                          in_sig_bt,
1973                          in_regs);
1974     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1975     __ flush();
1976     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1977     return nmethod::new_native_nmethod(method,
1978                                        compile_id,
1979                                        masm->code(),
1980                                        vep_offset,
1981                                        frame_complete,
1982                                        stack_slots / VMRegImpl::slots_per_word,
1983                                        in_ByteSize(-1),
1984                                        in_ByteSize(-1),
1985                                        (OopMapSet*)NULL);
1986   }
1987   bool is_critical_native = true;
1988   address native_func = method->critical_native_function();
1989   if (native_func == NULL) {
1990     native_func = method->native_function();
1991     is_critical_native = false;
1992   }
1993   assert(native_func != NULL, "must have function");
1994 
1995   // Native nmethod wrappers never take possesion of the oop arguments.
1996   // So the caller will gc the arguments. The only thing we need an
1997   // oopMap for is if the call is static
1998   //
1999   // An OopMap for lock (and class if static), and one for the VM call itself
2000   OopMapSet *oop_maps = new OopMapSet();
2001   intptr_t start = (intptr_t)__ pc();
2002 
2003   // First thing make an ic check to see if we should even be here
2004   {
2005     Label L;
2006     const Register temp_reg = G3_scratch;
2007     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2008     __ verify_oop(O0);
2009     __ load_klass(O0, temp_reg);
2010     __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
2011 
2012     __ jump_to(ic_miss, temp_reg);
2013     __ delayed()->nop();
2014     __ align(CodeEntryAlignment);
2015     __ bind(L);
2016   }
2017 
2018   int vep_offset = ((intptr_t)__ pc()) - start;
2019 
2020 #ifdef COMPILER1
2021   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2022     // Object.hashCode, System.identityHashCode can pull the hashCode from the
2023     // header word instead of doing a full VM transition once it's been computed.
2024     // Since hashCode is usually polymorphic at call sites we can't do this
2025     // optimization at the call site without a lot of work.
2026     Label slowCase;
2027     Label done;
2028     Register obj_reg              = O0;
2029     Register result               = O0;
2030     Register header               = G3_scratch;
2031     Register hash                 = G3_scratch; // overwrite header value with hash value
2032     Register mask                 = G1;         // to get hash field from header
2033 
2034     // Unlike for Object.hashCode, System.identityHashCode is static method and
2035     // gets object as argument instead of the receiver.
2036     if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) {
2037       assert(method->is_static(), "method should be static");
2038       // return 0 for null reference input
2039       __ br_null(obj_reg, false, Assembler::pn, done);
2040       __ delayed()->mov(obj_reg, hash);
2041     }
2042 
2043     // Read the header and build a mask to get its hash field.  Give up if the object is not unlocked.
2044     // We depend on hash_mask being at most 32 bits and avoid the use of
2045     // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
2046     // vm: see markOop.hpp.
2047     __ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header);
2048     __ sethi(markOopDesc::hash_mask, mask);
2049     __ btst(markOopDesc::unlocked_value, header);
2050     __ br(Assembler::zero, false, Assembler::pn, slowCase);
2051     if (UseBiasedLocking) {
2052       // Check if biased and fall through to runtime if so
2053       __ delayed()->nop();
2054       __ btst(markOopDesc::biased_lock_bit_in_place, header);
2055       __ br(Assembler::notZero, false, Assembler::pn, slowCase);
2056     }
2057     __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
2058 
2059     // Check for a valid (non-zero) hash code and get its value.
2060 #ifdef _LP64
2061     __ srlx(header, markOopDesc::hash_shift, hash);
2062 #else
2063     __ srl(header, markOopDesc::hash_shift, hash);
2064 #endif
2065     __ andcc(hash, mask, hash);
2066     __ br(Assembler::equal, false, Assembler::pn, slowCase);
2067     __ delayed()->nop();
2068 
2069     // leaf return.
2070     __ bind(done);
2071     __ retl();
2072     __ delayed()->mov(hash, result);
2073     __ bind(slowCase);
2074   }
2075 #endif // COMPILER1
2076 
2077 
2078   // We have received a description of where all the java arg are located
2079   // on entry to the wrapper. We need to convert these args to where
2080   // the jni function will expect them. To figure out where they go
2081   // we convert the java signature to a C signature by inserting
2082   // the hidden arguments as arg[0] and possibly arg[1] (static method)
2083 
2084   const int total_in_args = method->size_of_parameters();
2085   int total_c_args = total_in_args;
2086   int total_save_slots = 6 * VMRegImpl::slots_per_word;
2087   if (!is_critical_native) {
2088     total_c_args += 1;
2089     if (method->is_static()) {
2090       total_c_args++;
2091     }
2092   } else {
2093     for (int i = 0; i < total_in_args; i++) {
2094       if (in_sig_bt[i] == T_ARRAY) {
2095         // These have to be saved and restored across the safepoint
2096         total_c_args++;
2097       }
2098     }
2099   }
2100 
2101   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2102   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2103   BasicType* in_elem_bt = NULL;
2104 
2105   int argc = 0;
2106   if (!is_critical_native) {
2107     out_sig_bt[argc++] = T_ADDRESS;
2108     if (method->is_static()) {
2109       out_sig_bt[argc++] = T_OBJECT;
2110     }
2111 
2112     for (int i = 0; i < total_in_args ; i++ ) {
2113       out_sig_bt[argc++] = in_sig_bt[i];
2114     }
2115   } else {
2116     Thread* THREAD = Thread::current();
2117     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2118     SignatureStream ss(method->signature());
2119     for (int i = 0; i < total_in_args ; i++ ) {
2120       if (in_sig_bt[i] == T_ARRAY) {
2121         // Arrays are passed as int, elem* pair
2122         out_sig_bt[argc++] = T_INT;
2123         out_sig_bt[argc++] = T_ADDRESS;
2124         Symbol* atype = ss.as_symbol(CHECK_NULL);
2125         const char* at = atype->as_C_string();
2126         if (strlen(at) == 2) {
2127           assert(at[0] == '[', "must be");
2128           switch (at[1]) {
2129             case 'B': in_elem_bt[i]  = T_BYTE; break;
2130             case 'C': in_elem_bt[i]  = T_CHAR; break;
2131             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
2132             case 'F': in_elem_bt[i]  = T_FLOAT; break;
2133             case 'I': in_elem_bt[i]  = T_INT; break;
2134             case 'J': in_elem_bt[i]  = T_LONG; break;
2135             case 'S': in_elem_bt[i]  = T_SHORT; break;
2136             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
2137             default: ShouldNotReachHere();
2138           }
2139         }
2140       } else {
2141         out_sig_bt[argc++] = in_sig_bt[i];
2142         in_elem_bt[i] = T_VOID;
2143       }
2144       if (in_sig_bt[i] != T_VOID) {
2145         assert(in_sig_bt[i] == ss.type(), "must match");
2146         ss.next();
2147       }
2148     }
2149   }
2150 
2151   // Now figure out where the args must be stored and how much stack space
2152   // they require (neglecting out_preserve_stack_slots but space for storing
2153   // the 1st six register arguments). It's weird see int_stk_helper.
2154   //
2155   int out_arg_slots;
2156   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2157 
2158   if (is_critical_native) {
2159     // Critical natives may have to call out so they need a save area
2160     // for register arguments.
2161     int double_slots = 0;
2162     int single_slots = 0;
2163     for ( int i = 0; i < total_in_args; i++) {
2164       if (in_regs[i].first()->is_Register()) {
2165         const Register reg = in_regs[i].first()->as_Register();
2166         switch (in_sig_bt[i]) {
2167           case T_ARRAY:
2168           case T_BOOLEAN:
2169           case T_BYTE:
2170           case T_SHORT:
2171           case T_CHAR:
2172           case T_INT:  assert(reg->is_in(), "don't need to save these"); break;
2173           case T_LONG: if (reg->is_global()) double_slots++; break;
2174           default:  ShouldNotReachHere();
2175         }
2176       } else if (in_regs[i].first()->is_FloatRegister()) {
2177         switch (in_sig_bt[i]) {
2178           case T_FLOAT:  single_slots++; break;
2179           case T_DOUBLE: double_slots++; break;
2180           default:  ShouldNotReachHere();
2181         }
2182       }
2183     }
2184     total_save_slots = double_slots * 2 + single_slots;
2185   }
2186 
2187   // Compute framesize for the wrapper.  We need to handlize all oops in
2188   // registers. We must create space for them here that is disjoint from
2189   // the windowed save area because we have no control over when we might
2190   // flush the window again and overwrite values that gc has since modified.
2191   // (The live window race)
2192   //
2193   // We always just allocate 6 word for storing down these object. This allow
2194   // us to simply record the base and use the Ireg number to decide which
2195   // slot to use. (Note that the reg number is the inbound number not the
2196   // outbound number).
2197   // We must shuffle args to match the native convention, and include var-args space.
2198 
2199   // Calculate the total number of stack slots we will need.
2200 
2201   // First count the abi requirement plus all of the outgoing args
2202   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2203 
2204   // Now the space for the inbound oop handle area
2205 
2206   int oop_handle_offset = round_to(stack_slots, 2);
2207   stack_slots += total_save_slots;
2208 
2209   // Now any space we need for handlizing a klass if static method
2210 
2211   int klass_slot_offset = 0;
2212   int klass_offset = -1;
2213   int lock_slot_offset = 0;
2214   bool is_static = false;
2215 
2216   if (method->is_static()) {
2217     klass_slot_offset = stack_slots;
2218     stack_slots += VMRegImpl::slots_per_word;
2219     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2220     is_static = true;
2221   }
2222 
2223   // Plus a lock if needed
2224 
2225   if (method->is_synchronized()) {
2226     lock_slot_offset = stack_slots;
2227     stack_slots += VMRegImpl::slots_per_word;
2228   }
2229 
2230   // Now a place to save return value or as a temporary for any gpr -> fpr moves
2231   stack_slots += 2;
2232 
2233   // Ok The space we have allocated will look like:
2234   //
2235   //
2236   // FP-> |                     |
2237   //      |---------------------|
2238   //      | 2 slots for moves   |
2239   //      |---------------------|
2240   //      | lock box (if sync)  |
2241   //      |---------------------| <- lock_slot_offset
2242   //      | klass (if static)   |
2243   //      |---------------------| <- klass_slot_offset
2244   //      | oopHandle area      |
2245   //      |---------------------| <- oop_handle_offset
2246   //      | outbound memory     |
2247   //      | based arguments     |
2248   //      |                     |
2249   //      |---------------------|
2250   //      | vararg area         |
2251   //      |---------------------|
2252   //      |                     |
2253   // SP-> | out_preserved_slots |
2254   //
2255   //
2256 
2257 
2258   // Now compute actual number of stack words we need rounding to make
2259   // stack properly aligned.
2260   stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
2261 
2262   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2263 
2264   // Generate stack overflow check before creating frame
2265   __ generate_stack_overflow_check(stack_size);
2266 
2267   // Generate a new frame for the wrapper.
2268   __ save(SP, -stack_size, SP);
2269 
2270   int frame_complete = ((intptr_t)__ pc()) - start;
2271 
2272   __ verify_thread();
2273 
2274   if (is_critical_native) {
2275     check_needs_gc_for_critical_native(masm, stack_slots,  total_in_args,
2276                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2277   }
2278 
2279   //
2280   // We immediately shuffle the arguments so that any vm call we have to
2281   // make from here on out (sync slow path, jvmti, etc.) we will have
2282   // captured the oops from our caller and have a valid oopMap for
2283   // them.
2284 
2285   // -----------------
2286   // The Grand Shuffle
2287   //
2288   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2289   // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2290   // the class mirror instead of a receiver.  This pretty much guarantees that
2291   // register layout will not match.  We ignore these extra arguments during
2292   // the shuffle. The shuffle is described by the two calling convention
2293   // vectors we have in our possession. We simply walk the java vector to
2294   // get the source locations and the c vector to get the destinations.
2295   // Because we have a new window and the argument registers are completely
2296   // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2297   // here.
2298 
2299   // This is a trick. We double the stack slots so we can claim
2300   // the oops in the caller's frame. Since we are sure to have
2301   // more args than the caller doubling is enough to make
2302   // sure we can capture all the incoming oop args from the
2303   // caller.
2304   //
2305   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2306   // Record sp-based slot for receiver on stack for non-static methods
2307   int receiver_offset = -1;
2308 
2309   // We move the arguments backward because the floating point registers
2310   // destination will always be to a register with a greater or equal register
2311   // number or the stack.
2312 
2313 #ifdef ASSERT
2314   bool reg_destroyed[RegisterImpl::number_of_registers];
2315   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2316   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2317     reg_destroyed[r] = false;
2318   }
2319   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2320     freg_destroyed[f] = false;
2321   }
2322 
2323 #endif /* ASSERT */
2324 
2325   for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
2326 
2327 #ifdef ASSERT
2328     if (in_regs[i].first()->is_Register()) {
2329       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2330     } else if (in_regs[i].first()->is_FloatRegister()) {
2331       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2332     }
2333     if (out_regs[c_arg].first()->is_Register()) {
2334       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2335     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2336       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2337     }
2338 #endif /* ASSERT */
2339 
2340     switch (in_sig_bt[i]) {
2341       case T_ARRAY:
2342         if (is_critical_native) {
2343           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
2344           c_arg--;
2345           break;
2346         }
2347       case T_OBJECT:
2348         assert(!is_critical_native, "no oop arguments");
2349         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2350                     ((i == 0) && (!is_static)),
2351                     &receiver_offset);
2352         break;
2353       case T_VOID:
2354         break;
2355 
2356       case T_FLOAT:
2357         float_move(masm, in_regs[i], out_regs[c_arg]);
2358         break;
2359 
2360       case T_DOUBLE:
2361         assert( i + 1 < total_in_args &&
2362                 in_sig_bt[i + 1] == T_VOID &&
2363                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2364         double_move(masm, in_regs[i], out_regs[c_arg]);
2365         break;
2366 
2367       case T_LONG :
2368         long_move(masm, in_regs[i], out_regs[c_arg]);
2369         break;
2370 
2371       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2372 
2373       default:
2374         move32_64(masm, in_regs[i], out_regs[c_arg]);
2375     }
2376   }
2377 
2378   // Pre-load a static method's oop into O1.  Used both by locking code and
2379   // the normal JNI call code.
2380   if (method->is_static() && !is_critical_native) {
2381     __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1);
2382 
2383     // Now handlize the static class mirror in O1.  It's known not-null.
2384     __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2385     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2386     __ add(SP, klass_offset + STACK_BIAS, O1);
2387   }
2388 
2389 
2390   const Register L6_handle = L6;
2391 
2392   if (method->is_synchronized()) {
2393     assert(!is_critical_native, "unhandled");
2394     __ mov(O1, L6_handle);
2395   }
2396 
2397   // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2398   // except O6/O7. So if we must call out we must push a new frame. We immediately
2399   // push a new frame and flush the windows.
2400 #ifdef _LP64
2401   intptr_t thepc = (intptr_t) __ pc();
2402   {
2403     address here = __ pc();
2404     // Call the next instruction
2405     __ call(here + 8, relocInfo::none);
2406     __ delayed()->nop();
2407   }
2408 #else
2409   intptr_t thepc = __ load_pc_address(O7, 0);
2410 #endif /* _LP64 */
2411 
2412   // We use the same pc/oopMap repeatedly when we call out
2413   oop_maps->add_gc_map(thepc - start, map);
2414 
2415   // O7 now has the pc loaded that we will use when we finally call to native.
2416 
2417   // Save thread in L7; it crosses a bunch of VM calls below
2418   // Don't use save_thread because it smashes G2 and we merely
2419   // want to save a copy
2420   __ mov(G2_thread, L7_thread_cache);
2421 
2422 
2423   // If we create an inner frame once is plenty
2424   // when we create it we must also save G2_thread
2425   bool inner_frame_created = false;
2426 
2427   // dtrace method entry support
2428   {
2429     SkipIfEqual skip_if(
2430       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2431     // create inner frame
2432     __ save_frame(0);
2433     __ mov(G2_thread, L7_thread_cache);
2434     __ set_metadata_constant(method(), O1);
2435     __ call_VM_leaf(L7_thread_cache,
2436          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2437          G2_thread, O1);
2438     __ restore();
2439   }
2440 
2441   // RedefineClasses() tracing support for obsolete method entry
2442   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2443     // create inner frame
2444     __ save_frame(0);
2445     __ mov(G2_thread, L7_thread_cache);
2446     __ set_metadata_constant(method(), O1);
2447     __ call_VM_leaf(L7_thread_cache,
2448          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2449          G2_thread, O1);
2450     __ restore();
2451   }
2452 
2453   // We are in the jni frame unless saved_frame is true in which case
2454   // we are in one frame deeper (the "inner" frame). If we are in the
2455   // "inner" frames the args are in the Iregs and if the jni frame then
2456   // they are in the Oregs.
2457   // If we ever need to go to the VM (for locking, jvmti) then
2458   // we will always be in the "inner" frame.
2459 
2460   // Lock a synchronized method
2461   int lock_offset = -1;         // Set if locked
2462   if (method->is_synchronized()) {
2463     Register Roop = O1;
2464     const Register L3_box = L3;
2465 
2466     create_inner_frame(masm, &inner_frame_created);
2467 
2468     __ ld_ptr(I1, 0, O1);
2469     Label done;
2470 
2471     lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2472     __ add(FP, lock_offset+STACK_BIAS, L3_box);
2473 #ifdef ASSERT
2474     if (UseBiasedLocking) {
2475       // making the box point to itself will make it clear it went unused
2476       // but also be obviously invalid
2477       __ st_ptr(L3_box, L3_box, 0);
2478     }
2479 #endif // ASSERT
2480     //
2481     // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2482     //
2483     __ compiler_lock_object(Roop, L1,    L3_box, L2);
2484     __ br(Assembler::equal, false, Assembler::pt, done);
2485     __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2486 
2487 
2488     // None of the above fast optimizations worked so we have to get into the
2489     // slow case of monitor enter.  Inline a special case of call_VM that
2490     // disallows any pending_exception.
2491     __ mov(Roop, O0);            // Need oop in O0
2492     __ mov(L3_box, O1);
2493 
2494     // Record last_Java_sp, in case the VM code releases the JVM lock.
2495 
2496     __ set_last_Java_frame(FP, I7);
2497 
2498     // do the call
2499     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2500     __ delayed()->mov(L7_thread_cache, O2);
2501 
2502     __ restore_thread(L7_thread_cache); // restore G2_thread
2503     __ reset_last_Java_frame();
2504 
2505 #ifdef ASSERT
2506     { Label L;
2507     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2508     __ br_null_short(O0, Assembler::pt, L);
2509     __ stop("no pending exception allowed on exit from IR::monitorenter");
2510     __ bind(L);
2511     }
2512 #endif
2513     __ bind(done);
2514   }
2515 
2516 
2517   // Finally just about ready to make the JNI call
2518 
2519   __ flushw();
2520   if (inner_frame_created) {
2521     __ restore();
2522   } else {
2523     // Store only what we need from this frame
2524     // QQQ I think that non-v9 (like we care) we don't need these saves
2525     // either as the flush traps and the current window goes too.
2526     __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2527     __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2528   }
2529 
2530   // get JNIEnv* which is first argument to native
2531   if (!is_critical_native) {
2532     __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2533   }
2534 
2535   // Use that pc we placed in O7 a while back as the current frame anchor
2536   __ set_last_Java_frame(SP, O7);
2537 
2538   // We flushed the windows ages ago now mark them as flushed before transitioning.
2539   __ set(JavaFrameAnchor::flushed, G3_scratch);
2540   __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2541 
2542   // Transition from _thread_in_Java to _thread_in_native.
2543   __ set(_thread_in_native, G3_scratch);
2544 
2545 #ifdef _LP64
2546   AddressLiteral dest(native_func);
2547   __ relocate(relocInfo::runtime_call_type);
2548   __ jumpl_to(dest, O7, O7);
2549 #else
2550   __ call(native_func, relocInfo::runtime_call_type);
2551 #endif
2552   __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2553 
2554   __ restore_thread(L7_thread_cache); // restore G2_thread
2555 
2556   // Unpack native results.  For int-types, we do any needed sign-extension
2557   // and move things into I0.  The return value there will survive any VM
2558   // calls for blocking or unlocking.  An FP or OOP result (handle) is done
2559   // specially in the slow-path code.
2560   switch (ret_type) {
2561   case T_VOID:    break;        // Nothing to do!
2562   case T_FLOAT:   break;        // Got it where we want it (unless slow-path)
2563   case T_DOUBLE:  break;        // Got it where we want it (unless slow-path)
2564   // In 64 bits build result is in O0, in O0, O1 in 32bit build
2565   case T_LONG:
2566 #ifndef _LP64
2567                   __ mov(O1, I1);
2568 #endif
2569                   // Fall thru
2570   case T_OBJECT:                // Really a handle
2571   case T_ARRAY:
2572   case T_INT:
2573                   __ mov(O0, I0);
2574                   break;
2575   case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2576   case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
2577   case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
2578   case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
2579     break;                      // Cannot de-handlize until after reclaiming jvm_lock
2580   default:
2581     ShouldNotReachHere();
2582   }
2583 
2584   Label after_transition;
2585   // must we block?
2586 
2587   // Block, if necessary, before resuming in _thread_in_Java state.
2588   // In order for GC to work, don't clear the last_Java_sp until after blocking.
2589   { Label no_block;
2590     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2591 
2592     // Switch thread to "native transition" state before reading the synchronization state.
2593     // This additional state is necessary because reading and testing the synchronization
2594     // state is not atomic w.r.t. GC, as this scenario demonstrates:
2595     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2596     //     VM thread changes sync state to synchronizing and suspends threads for GC.
2597     //     Thread A is resumed to finish this native method, but doesn't block here since it
2598     //     didn't see any synchronization is progress, and escapes.
2599     __ set(_thread_in_native_trans, G3_scratch);
2600     __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2601     if(os::is_MP()) {
2602       if (UseMembar) {
2603         // Force this write out before the read below
2604         __ membar(Assembler::StoreLoad);
2605       } else {
2606         // Write serialization page so VM thread can do a pseudo remote membar.
2607         // We use the current thread pointer to calculate a thread specific
2608         // offset to write to within the page. This minimizes bus traffic
2609         // due to cache line collision.
2610         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2611       }
2612     }
2613     __ load_contents(sync_state, G3_scratch);
2614     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2615 
2616     Label L;
2617     Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2618     __ br(Assembler::notEqual, false, Assembler::pn, L);
2619     __ delayed()->ld(suspend_state, G3_scratch);
2620     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2621     __ bind(L);
2622 
2623     // Block.  Save any potential method result value before the operation and
2624     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2625     // lets us share the oopMap we used when we went native rather the create
2626     // a distinct one for this pc
2627     //
2628     save_native_result(masm, ret_type, stack_slots);
2629     if (!is_critical_native) {
2630       __ call_VM_leaf(L7_thread_cache,
2631                       CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2632                       G2_thread);
2633     } else {
2634       __ call_VM_leaf(L7_thread_cache,
2635                       CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2636                       G2_thread);
2637     }
2638 
2639     // Restore any method result value
2640     restore_native_result(masm, ret_type, stack_slots);
2641 
2642     if (is_critical_native) {
2643       // The call above performed the transition to thread_in_Java so
2644       // skip the transition logic below.
2645       __ ba(after_transition);
2646       __ delayed()->nop();
2647     }
2648 
2649     __ bind(no_block);
2650   }
2651 
2652   // thread state is thread_in_native_trans. Any safepoint blocking has already
2653   // happened so we can now change state to _thread_in_Java.
2654   __ set(_thread_in_Java, G3_scratch);
2655   __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2656   __ bind(after_transition);
2657 
2658   Label no_reguard;
2659   __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2660   __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_reserved_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
2661 
2662     save_native_result(masm, ret_type, stack_slots);
2663   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2664   __ delayed()->nop();
2665 
2666   __ restore_thread(L7_thread_cache); // restore G2_thread
2667     restore_native_result(masm, ret_type, stack_slots);
2668 
2669   __ bind(no_reguard);
2670 
2671   // Handle possible exception (will unlock if necessary)
2672 
2673   // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2674 
2675   // Unlock
2676   if (method->is_synchronized()) {
2677     Label done;
2678     Register I2_ex_oop = I2;
2679     const Register L3_box = L3;
2680     // Get locked oop from the handle we passed to jni
2681     __ ld_ptr(L6_handle, 0, L4);
2682     __ add(SP, lock_offset+STACK_BIAS, L3_box);
2683     // Must save pending exception around the slow-path VM call.  Since it's a
2684     // leaf call, the pending exception (if any) can be kept in a register.
2685     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2686     // Now unlock
2687     //                       (Roop, Rmark, Rbox,   Rscratch)
2688     __ compiler_unlock_object(L4,   L1,    L3_box, L2);
2689     __ br(Assembler::equal, false, Assembler::pt, done);
2690     __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2691 
2692     // save and restore any potential method result value around the unlocking
2693     // operation.  Will save in I0 (or stack for FP returns).
2694     save_native_result(masm, ret_type, stack_slots);
2695 
2696     // Must clear pending-exception before re-entering the VM.  Since this is
2697     // a leaf call, pending-exception-oop can be safely kept in a register.
2698     __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2699 
2700     // slow case of monitor enter.  Inline a special case of call_VM that
2701     // disallows any pending_exception.
2702     __ mov(L3_box, O1);
2703 
2704     // Pass in current thread pointer
2705     __ mov(G2_thread, O2);
2706 
2707     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2708     __ delayed()->mov(L4, O0);              // Need oop in O0
2709 
2710     __ restore_thread(L7_thread_cache); // restore G2_thread
2711 
2712 #ifdef ASSERT
2713     { Label L;
2714     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2715     __ br_null_short(O0, Assembler::pt, L);
2716     __ stop("no pending exception allowed on exit from IR::monitorexit");
2717     __ bind(L);
2718     }
2719 #endif
2720     restore_native_result(masm, ret_type, stack_slots);
2721     // check_forward_pending_exception jump to forward_exception if any pending
2722     // exception is set.  The forward_exception routine expects to see the
2723     // exception in pending_exception and not in a register.  Kind of clumsy,
2724     // since all folks who branch to forward_exception must have tested
2725     // pending_exception first and hence have it in a register already.
2726     __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2727     __ bind(done);
2728   }
2729 
2730   // Tell dtrace about this method exit
2731   {
2732     SkipIfEqual skip_if(
2733       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2734     save_native_result(masm, ret_type, stack_slots);
2735     __ set_metadata_constant(method(), O1);
2736     __ call_VM_leaf(L7_thread_cache,
2737        CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2738        G2_thread, O1);
2739     restore_native_result(masm, ret_type, stack_slots);
2740   }
2741 
2742   // Clear "last Java frame" SP and PC.
2743   __ verify_thread(); // G2_thread must be correct
2744   __ reset_last_Java_frame();
2745 
2746   // Unpack oop result
2747   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2748       Label L;
2749       __ addcc(G0, I0, G0);
2750       __ brx(Assembler::notZero, true, Assembler::pt, L);
2751       __ delayed()->ld_ptr(I0, 0, I0);
2752       __ mov(G0, I0);
2753       __ bind(L);
2754       __ verify_oop(I0);
2755   }
2756 
2757   if (!is_critical_native) {
2758     // reset handle block
2759     __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2760     __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2761 
2762     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2763     check_forward_pending_exception(masm, G3_scratch);
2764   }
2765 
2766 
2767   // Return
2768 
2769 #ifndef _LP64
2770   if (ret_type == T_LONG) {
2771 
2772     // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2773     __ sllx(I0, 32, G1);          // Shift bits into high G1
2774     __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
2775     __ or3 (I1, G1, G1);          // OR 64 bits into G1
2776   }
2777 #endif
2778 
2779   __ ret();
2780   __ delayed()->restore();
2781 
2782   __ flush();
2783 
2784   nmethod *nm = nmethod::new_native_nmethod(method,
2785                                             compile_id,
2786                                             masm->code(),
2787                                             vep_offset,
2788                                             frame_complete,
2789                                             stack_slots / VMRegImpl::slots_per_word,
2790                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2791                                             in_ByteSize(lock_offset),
2792                                             oop_maps);
2793 
2794   if (is_critical_native) {
2795     nm->set_lazy_critical_native(true);
2796   }
2797   return nm;
2798 
2799 }
2800 
2801 // this function returns the adjust size (in number of words) to a c2i adapter
2802 // activation for use during deoptimization
2803 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2804   assert(callee_locals >= callee_parameters,
2805           "test and remove; got more parms than locals");
2806   if (callee_locals < callee_parameters)
2807     return 0;                   // No adjustment for negative locals
2808   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2809   return round_to(diff, WordsPerLong);
2810 }
2811 
2812 // "Top of Stack" slots that may be unused by the calling convention but must
2813 // otherwise be preserved.
2814 // On Intel these are not necessary and the value can be zero.
2815 // On Sparc this describes the words reserved for storing a register window
2816 // when an interrupt occurs.
2817 uint SharedRuntime::out_preserve_stack_slots() {
2818   return frame::register_save_words * VMRegImpl::slots_per_word;
2819 }
2820 
2821 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
2822 //
2823 // Common out the new frame generation for deopt and uncommon trap
2824 //
2825   Register        G3pcs              = G3_scratch; // Array of new pcs (input)
2826   Register        Oreturn0           = O0;
2827   Register        Oreturn1           = O1;
2828   Register        O2UnrollBlock      = O2;
2829   Register        O3array            = O3;         // Array of frame sizes (input)
2830   Register        O4array_size       = O4;         // number of frames (input)
2831   Register        O7frame_size       = O7;         // number of frames (input)
2832 
2833   __ ld_ptr(O3array, 0, O7frame_size);
2834   __ sub(G0, O7frame_size, O7frame_size);
2835   __ save(SP, O7frame_size, SP);
2836   __ ld_ptr(G3pcs, 0, I7);                      // load frame's new pc
2837 
2838   #ifdef ASSERT
2839   // make sure that the frames are aligned properly
2840 #ifndef _LP64
2841   __ btst(wordSize*2-1, SP);
2842   __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
2843 #endif
2844   #endif
2845 
2846   // Deopt needs to pass some extra live values from frame to frame
2847 
2848   if (deopt) {
2849     __ mov(Oreturn0->after_save(), Oreturn0);
2850     __ mov(Oreturn1->after_save(), Oreturn1);
2851   }
2852 
2853   __ mov(O4array_size->after_save(), O4array_size);
2854   __ sub(O4array_size, 1, O4array_size);
2855   __ mov(O3array->after_save(), O3array);
2856   __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
2857   __ add(G3pcs, wordSize, G3pcs);               // point to next pc value
2858 
2859   #ifdef ASSERT
2860   // trash registers to show a clear pattern in backtraces
2861   __ set(0xDEAD0000, I0);
2862   __ add(I0,  2, I1);
2863   __ add(I0,  4, I2);
2864   __ add(I0,  6, I3);
2865   __ add(I0,  8, I4);
2866   // Don't touch I5 could have valuable savedSP
2867   __ set(0xDEADBEEF, L0);
2868   __ mov(L0, L1);
2869   __ mov(L0, L2);
2870   __ mov(L0, L3);
2871   __ mov(L0, L4);
2872   __ mov(L0, L5);
2873 
2874   // trash the return value as there is nothing to return yet
2875   __ set(0xDEAD0001, O7);
2876   #endif
2877 
2878   __ mov(SP, O5_savedSP);
2879 }
2880 
2881 
2882 static void make_new_frames(MacroAssembler* masm, bool deopt) {
2883   //
2884   // loop through the UnrollBlock info and create new frames
2885   //
2886   Register        G3pcs              = G3_scratch;
2887   Register        Oreturn0           = O0;
2888   Register        Oreturn1           = O1;
2889   Register        O2UnrollBlock      = O2;
2890   Register        O3array            = O3;
2891   Register        O4array_size       = O4;
2892   Label           loop;
2893 
2894 #ifdef ASSERT
2895   // Compilers generate code that bang the stack by as much as the
2896   // interpreter would need. So this stack banging should never
2897   // trigger a fault. Verify that it does not on non product builds.
2898   if (UseStackBanging) {
2899     // Get total frame size for interpreted frames
2900     __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
2901     __ bang_stack_size(O4, O3, G3_scratch);
2902   }
2903 #endif
2904 
2905   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
2906   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
2907   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
2908 
2909   // Adjust old interpreter frame to make space for new frame's extra java locals
2910   //
2911   // We capture the original sp for the transition frame only because it is needed in
2912   // order to properly calculate interpreter_sp_adjustment. Even though in real life
2913   // every interpreter frame captures a savedSP it is only needed at the transition
2914   // (fortunately). If we had to have it correct everywhere then we would need to
2915   // be told the sp_adjustment for each frame we create. If the frame size array
2916   // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
2917   // for each frame we create and keep up the illusion every where.
2918   //
2919 
2920   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
2921   __ mov(SP, O5_savedSP);       // remember initial sender's original sp before adjustment
2922   __ sub(SP, O7, SP);
2923 
2924 #ifdef ASSERT
2925   // make sure that there is at least one entry in the array
2926   __ tst(O4array_size);
2927   __ breakpoint_trap(Assembler::zero, Assembler::icc);
2928 #endif
2929 
2930   // Now push the new interpreter frames
2931   __ bind(loop);
2932 
2933   // allocate a new frame, filling the registers
2934 
2935   gen_new_frame(masm, deopt);        // allocate an interpreter frame
2936 
2937   __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
2938   __ delayed()->add(O3array, wordSize, O3array);
2939   __ ld_ptr(G3pcs, 0, O7);                      // load final frame new pc
2940 
2941 }
2942 
2943 //------------------------------generate_deopt_blob----------------------------
2944 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
2945 // instead.
2946 void SharedRuntime::generate_deopt_blob() {
2947   // allocate space for the code
2948   ResourceMark rm;
2949   // setup code generation tools
2950   int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
2951 #ifdef ASSERT
2952   if (UseStackBanging) {
2953     pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
2954   }
2955 #endif
2956 #if INCLUDE_JVMCI
2957   if (EnableJVMCI) {
2958     pad += 1000; // Increase the buffer size when compiling for JVMCI
2959   }
2960 #endif
2961 #ifdef _LP64
2962   CodeBuffer buffer("deopt_blob", 2100+pad, 512);
2963 #else
2964   // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
2965   // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
2966   CodeBuffer buffer("deopt_blob", 1600+pad, 512);
2967 #endif /* _LP64 */
2968   MacroAssembler* masm               = new MacroAssembler(&buffer);
2969   FloatRegister   Freturn0           = F0;
2970   Register        Greturn1           = G1;
2971   Register        Oreturn0           = O0;
2972   Register        Oreturn1           = O1;
2973   Register        O2UnrollBlock      = O2;
2974   Register        L0deopt_mode       = L0;
2975   Register        G4deopt_mode       = G4_scratch;
2976   int             frame_size_words;
2977   Address         saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
2978 #if !defined(_LP64) && defined(COMPILER2)
2979   Address         saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
2980 #endif
2981   Label           cont;
2982 
2983   OopMapSet *oop_maps = new OopMapSet();
2984 
2985   //
2986   // This is the entry point for code which is returning to a de-optimized
2987   // frame.
2988   // The steps taken by this frame are as follows:
2989   //   - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
2990   //     and all potentially live registers (at a pollpoint many registers can be live).
2991   //
2992   //   - call the C routine: Deoptimization::fetch_unroll_info (this function
2993   //     returns information about the number and size of interpreter frames
2994   //     which are equivalent to the frame which is being deoptimized)
2995   //   - deallocate the unpack frame, restoring only results values. Other
2996   //     volatile registers will now be captured in the vframeArray as needed.
2997   //   - deallocate the deoptimization frame
2998   //   - in a loop using the information returned in the previous step
2999   //     push new interpreter frames (take care to propagate the return
3000   //     values through each new frame pushed)
3001   //   - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
3002   //   - call the C routine: Deoptimization::unpack_frames (this function
3003   //     lays out values on the interpreter frame which was just created)
3004   //   - deallocate the dummy unpack_frame
3005   //   - ensure that all the return values are correctly set and then do
3006   //     a return to the interpreter entry point
3007   //
3008   // Refer to the following methods for more information:
3009   //   - Deoptimization::fetch_unroll_info
3010   //   - Deoptimization::unpack_frames
3011 
3012   OopMap* map = NULL;
3013 
3014   int start = __ offset();
3015 
3016   // restore G2, the trampoline destroyed it
3017   __ get_thread();
3018 
3019   // On entry we have been called by the deoptimized nmethod with a call that
3020   // replaced the original call (or safepoint polling location) so the deoptimizing
3021   // pc is now in O7. Return values are still in the expected places
3022 
3023   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3024   __ ba(cont);
3025   __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
3026 
3027 
3028 #if INCLUDE_JVMCI
3029   Label after_fetch_unroll_info_call;
3030   int implicit_exception_uncommon_trap_offset = 0;
3031   int uncommon_trap_offset = 0;
3032 
3033   if (EnableJVMCI) {
3034     masm->block_comment("BEGIN implicit_exception_uncommon_trap");
3035     implicit_exception_uncommon_trap_offset = __ offset() - start;
3036 
3037     __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()), O7);
3038     __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
3039     __ add(O7, -8, O7);
3040 
3041     uncommon_trap_offset = __ offset() - start;
3042 
3043     // Save everything in sight.
3044     (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3045     __ set_last_Java_frame(SP, NULL);
3046 
3047     __ ld(G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()), O1);
3048     __ sub(G0, 1, L1);
3049     __ st(L1, G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()));
3050 
3051     __ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode);
3052     __ mov(G2_thread, O0);
3053     __ mov(L0deopt_mode, O2);
3054     __ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
3055     __ delayed()->nop();
3056     oop_maps->add_gc_map( __ offset()-start, map->deep_copy());
3057     __ get_thread();
3058     __ add(O7, 8, O7);
3059     __ reset_last_Java_frame();
3060 
3061     __ ba(after_fetch_unroll_info_call);
3062     __ delayed()->nop(); // Delay slot
3063     masm->block_comment("END implicit_exception_uncommon_trap");
3064   } // EnableJVMCI
3065 #endif // INCLUDE_JVMCI
3066 
3067   int exception_offset = __ offset() - start;
3068 
3069   // restore G2, the trampoline destroyed it
3070   __ get_thread();
3071 
3072   // On entry we have been jumped to by the exception handler (or exception_blob
3073   // for server).  O0 contains the exception oop and O7 contains the original
3074   // exception pc.  So if we push a frame here it will look to the
3075   // stack walking code (fetch_unroll_info) just like a normal call so
3076   // state will be extracted normally.
3077 
3078   // save exception oop in JavaThread and fall through into the
3079   // exception_in_tls case since they are handled in same way except
3080   // for where the pending exception is kept.
3081   __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3082 
3083   //
3084   // Vanilla deoptimization with an exception pending in exception_oop
3085   //
3086   int exception_in_tls_offset = __ offset() - start;
3087 
3088   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3089   // Opens a new stack frame
3090   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3091 
3092   // Restore G2_thread
3093   __ get_thread();
3094 
3095 #ifdef ASSERT
3096   {
3097     // verify that there is really an exception oop in exception_oop
3098     Label has_exception;
3099     __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3100     __ br_notnull_short(Oexception, Assembler::pt, has_exception);
3101     __ stop("no exception in thread");
3102     __ bind(has_exception);
3103 
3104     // verify that there is no pending exception
3105     Label no_pending_exception;
3106     Address exception_addr(G2_thread, Thread::pending_exception_offset());
3107     __ ld_ptr(exception_addr, Oexception);
3108     __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
3109     __ stop("must not have pending exception here");
3110     __ bind(no_pending_exception);
3111   }
3112 #endif
3113 
3114   __ ba(cont);
3115   __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3116 
3117   //
3118   // Reexecute entry, similar to c2 uncommon trap
3119   //
3120   int reexecute_offset = __ offset() - start;
3121 #if INCLUDE_JVMCI && !defined(COMPILER1)
3122   if (EnableJVMCI && UseJVMCICompiler) {
3123     // JVMCI does not use this kind of deoptimization
3124     __ should_not_reach_here();
3125   }
3126 #endif
3127   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3128   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3129 
3130   __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3131 
3132   __ bind(cont);
3133 
3134   __ set_last_Java_frame(SP, noreg);
3135 
3136   // do the call by hand so we can get the oopmap
3137 
3138   __ mov(G2_thread, L7_thread_cache);
3139   __ mov(L0deopt_mode, O1);
3140   __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3141   __ delayed()->mov(G2_thread, O0);
3142 
3143   // Set an oopmap for the call site this describes all our saved volatile registers
3144 
3145   oop_maps->add_gc_map( __ offset()-start, map);
3146 
3147   __ mov(L7_thread_cache, G2_thread);
3148 
3149   __ reset_last_Java_frame();
3150 
3151 #if INCLUDE_JVMCI
3152   if (EnableJVMCI) {
3153     __ bind(after_fetch_unroll_info_call);
3154   }
3155 #endif
3156   // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3157   // so this move will survive
3158 
3159   __ mov(L0deopt_mode, G4deopt_mode);
3160 
3161   __ mov(O0, O2UnrollBlock->after_save());
3162 
3163   RegisterSaver::restore_result_registers(masm);
3164 
3165   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), G4deopt_mode);
3166   Label noException;
3167   __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
3168 
3169   // Move the pending exception from exception_oop to Oexception so
3170   // the pending exception will be picked up the interpreter.
3171   __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3172   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3173   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
3174   __ bind(noException);
3175 
3176   // deallocate the deoptimization frame taking care to preserve the return values
3177   __ mov(Oreturn0,     Oreturn0->after_save());
3178   __ mov(Oreturn1,     Oreturn1->after_save());
3179   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3180   __ restore();
3181 
3182   // Allocate new interpreter frame(s) and possible c2i adapter frame
3183 
3184   make_new_frames(masm, true);
3185 
3186   // push a dummy "unpack_frame" taking care of float return values and
3187   // call Deoptimization::unpack_frames to have the unpacker layout
3188   // information in the interpreter frames just created and then return
3189   // to the interpreter entry point
3190   __ save(SP, -frame_size_words*wordSize, SP);
3191   __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3192 #if !defined(_LP64)
3193 #if defined(COMPILER2)
3194   // 32-bit 1-register longs return longs in G1
3195   __ stx(Greturn1, saved_Greturn1_addr);
3196 #endif
3197   __ set_last_Java_frame(SP, noreg);
3198   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3199 #else
3200   // LP64 uses g4 in set_last_Java_frame
3201   __ mov(G4deopt_mode, O1);
3202   __ set_last_Java_frame(SP, G0);
3203   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3204 #endif
3205   __ reset_last_Java_frame();
3206   __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3207 
3208 #if !defined(_LP64) && defined(COMPILER2)
3209   // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3210   // I0/I1 if the return value is long.
3211   Label not_long;
3212   __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
3213   __ ldd(saved_Greturn1_addr,I0);
3214   __ bind(not_long);
3215 #endif
3216   __ ret();
3217   __ delayed()->restore();
3218 
3219   masm->flush();
3220   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3221   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3222 #if INCLUDE_JVMCI
3223   if (EnableJVMCI) {
3224     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
3225     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
3226   }
3227 #endif
3228 }
3229 
3230 #ifdef COMPILER2
3231 
3232 //------------------------------generate_uncommon_trap_blob--------------------
3233 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3234 // instead.
3235 void SharedRuntime::generate_uncommon_trap_blob() {
3236   // allocate space for the code
3237   ResourceMark rm;
3238   // setup code generation tools
3239   int pad = VerifyThread ? 512 : 0;
3240 #ifdef ASSERT
3241   if (UseStackBanging) {
3242     pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
3243   }
3244 #endif
3245 #ifdef _LP64
3246   CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3247 #else
3248   // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3249   // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3250   CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3251 #endif
3252   MacroAssembler* masm               = new MacroAssembler(&buffer);
3253   Register        O2UnrollBlock      = O2;
3254   Register        O2klass_index      = O2;
3255 
3256   //
3257   // This is the entry point for all traps the compiler takes when it thinks
3258   // it cannot handle further execution of compilation code. The frame is
3259   // deoptimized in these cases and converted into interpreter frames for
3260   // execution
3261   // The steps taken by this frame are as follows:
3262   //   - push a fake "unpack_frame"
3263   //   - call the C routine Deoptimization::uncommon_trap (this function
3264   //     packs the current compiled frame into vframe arrays and returns
3265   //     information about the number and size of interpreter frames which
3266   //     are equivalent to the frame which is being deoptimized)
3267   //   - deallocate the "unpack_frame"
3268   //   - deallocate the deoptimization frame
3269   //   - in a loop using the information returned in the previous step
3270   //     push interpreter frames;
3271   //   - create a dummy "unpack_frame"
3272   //   - call the C routine: Deoptimization::unpack_frames (this function
3273   //     lays out values on the interpreter frame which was just created)
3274   //   - deallocate the dummy unpack_frame
3275   //   - return to the interpreter entry point
3276   //
3277   //  Refer to the following methods for more information:
3278   //   - Deoptimization::uncommon_trap
3279   //   - Deoptimization::unpack_frame
3280 
3281   // the unloaded class index is in O0 (first parameter to this blob)
3282 
3283   // push a dummy "unpack_frame"
3284   // and call Deoptimization::uncommon_trap to pack the compiled frame into
3285   // vframe array and return the UnrollBlock information
3286   __ save_frame(0);
3287   __ set_last_Java_frame(SP, noreg);
3288   __ mov(I0, O2klass_index);
3289   __ mov(Deoptimization::Unpack_uncommon_trap, O3); // exec mode
3290   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index, O3);
3291   __ reset_last_Java_frame();
3292   __ mov(O0, O2UnrollBlock->after_save());
3293   __ restore();
3294 
3295   // deallocate the deoptimized frame taking care to preserve the return values
3296   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3297   __ restore();
3298 
3299 #ifdef ASSERT
3300   { Label L;
3301     __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), O1);
3302     __ cmp_and_br_short(O1, Deoptimization::Unpack_uncommon_trap, Assembler::equal, Assembler::pt, L);
3303     __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3304     __ bind(L);
3305   }
3306 #endif
3307 
3308   // Allocate new interpreter frame(s) and possible c2i adapter frame
3309 
3310   make_new_frames(masm, false);
3311 
3312   // push a dummy "unpack_frame" taking care of float return values and
3313   // call Deoptimization::unpack_frames to have the unpacker layout
3314   // information in the interpreter frames just created and then return
3315   // to the interpreter entry point
3316   __ save_frame(0);
3317   __ set_last_Java_frame(SP, noreg);
3318   __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3319   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3320   __ reset_last_Java_frame();
3321   __ ret();
3322   __ delayed()->restore();
3323 
3324   masm->flush();
3325   _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3326 }
3327 
3328 #endif // COMPILER2
3329 
3330 //------------------------------generate_handler_blob-------------------
3331 //
3332 // Generate a special Compile2Runtime blob that saves all registers, and sets
3333 // up an OopMap.
3334 //
3335 // This blob is jumped to (via a breakpoint and the signal handler) from a
3336 // safepoint in compiled code.  On entry to this blob, O7 contains the
3337 // address in the original nmethod at which we should resume normal execution.
3338 // Thus, this blob looks like a subroutine which must preserve lots of
3339 // registers and return normally.  Note that O7 is never register-allocated,
3340 // so it is guaranteed to be free here.
3341 //
3342 
3343 // The hardest part of what this blob must do is to save the 64-bit %o
3344 // registers in the 32-bit build.  A simple 'save' turn the %o's to %i's and
3345 // an interrupt will chop off their heads.  Making space in the caller's frame
3346 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
3347 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
3348 // SP and mess up HIS OopMaps.  So we first adjust the caller's SP, then save
3349 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3350 // Tricky, tricky, tricky...
3351 
3352 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3353   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3354 
3355   // allocate space for the code
3356   ResourceMark rm;
3357   // setup code generation tools
3358   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3359   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3360   // even larger with TraceJumps
3361   int pad = TraceJumps ? 512 : 0;
3362   CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3363   MacroAssembler* masm                = new MacroAssembler(&buffer);
3364   int             frame_size_words;
3365   OopMapSet *oop_maps = new OopMapSet();
3366   OopMap* map = NULL;
3367 
3368   int start = __ offset();
3369 
3370   bool cause_return = (poll_type == POLL_AT_RETURN);
3371   // If this causes a return before the processing, then do a "restore"
3372   if (cause_return) {
3373     __ restore();
3374   } else {
3375     // Make it look like we were called via the poll
3376     // so that frame constructor always sees a valid return address
3377     __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3378     __ sub(O7, frame::pc_return_offset, O7);
3379   }
3380 
3381   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3382 
3383   // setup last_Java_sp (blows G4)
3384   __ set_last_Java_frame(SP, noreg);
3385 
3386   // call into the runtime to handle illegal instructions exception
3387   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3388   __ mov(G2_thread, O0);
3389   __ save_thread(L7_thread_cache);
3390   __ call(call_ptr);
3391   __ delayed()->nop();
3392 
3393   // Set an oopmap for the call site.
3394   // We need this not only for callee-saved registers, but also for volatile
3395   // registers that the compiler might be keeping live across a safepoint.
3396 
3397   oop_maps->add_gc_map( __ offset() - start, map);
3398 
3399   __ restore_thread(L7_thread_cache);
3400   // clear last_Java_sp
3401   __ reset_last_Java_frame();
3402 
3403   // Check for exceptions
3404   Label pending;
3405 
3406   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3407   __ br_notnull_short(O1, Assembler::pn, pending);
3408 
3409   RegisterSaver::restore_live_registers(masm);
3410 
3411   // We are back the the original state on entry and ready to go.
3412 
3413   __ retl();
3414   __ delayed()->nop();
3415 
3416   // Pending exception after the safepoint
3417 
3418   __ bind(pending);
3419 
3420   RegisterSaver::restore_live_registers(masm);
3421 
3422   // We are back the the original state on entry.
3423 
3424   // Tail-call forward_exception_entry, with the issuing PC in O7,
3425   // so it looks like the original nmethod called forward_exception_entry.
3426   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3427   __ JMP(O0, 0);
3428   __ delayed()->nop();
3429 
3430   // -------------
3431   // make sure all code is generated
3432   masm->flush();
3433 
3434   // return exception blob
3435   return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3436 }
3437 
3438 //
3439 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3440 //
3441 // Generate a stub that calls into vm to find out the proper destination
3442 // of a java call. All the argument registers are live at this point
3443 // but since this is generic code we don't know what they are and the caller
3444 // must do any gc of the args.
3445 //
3446 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3447   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3448 
3449   // allocate space for the code
3450   ResourceMark rm;
3451   // setup code generation tools
3452   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3453   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3454   // even larger with TraceJumps
3455   int pad = TraceJumps ? 512 : 0;
3456   CodeBuffer buffer(name, 1600 + pad, 512);
3457   MacroAssembler* masm                = new MacroAssembler(&buffer);
3458   int             frame_size_words;
3459   OopMapSet *oop_maps = new OopMapSet();
3460   OopMap* map = NULL;
3461 
3462   int start = __ offset();
3463 
3464   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3465 
3466   int frame_complete = __ offset();
3467 
3468   // setup last_Java_sp (blows G4)
3469   __ set_last_Java_frame(SP, noreg);
3470 
3471   // call into the runtime to handle illegal instructions exception
3472   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3473   __ mov(G2_thread, O0);
3474   __ save_thread(L7_thread_cache);
3475   __ call(destination, relocInfo::runtime_call_type);
3476   __ delayed()->nop();
3477 
3478   // O0 contains the address we are going to jump to assuming no exception got installed
3479 
3480   // Set an oopmap for the call site.
3481   // We need this not only for callee-saved registers, but also for volatile
3482   // registers that the compiler might be keeping live across a safepoint.
3483 
3484   oop_maps->add_gc_map( __ offset() - start, map);
3485 
3486   __ restore_thread(L7_thread_cache);
3487   // clear last_Java_sp
3488   __ reset_last_Java_frame();
3489 
3490   // Check for exceptions
3491   Label pending;
3492 
3493   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3494   __ br_notnull_short(O1, Assembler::pn, pending);
3495 
3496   // get the returned Method*
3497 
3498   __ get_vm_result_2(G5_method);
3499   __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3500 
3501   // O0 is where we want to jump, overwrite G3 which is saved and scratch
3502 
3503   __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3504 
3505   RegisterSaver::restore_live_registers(masm);
3506 
3507   // We are back the the original state on entry and ready to go.
3508 
3509   __ JMP(G3, 0);
3510   __ delayed()->nop();
3511 
3512   // Pending exception after the safepoint
3513 
3514   __ bind(pending);
3515 
3516   RegisterSaver::restore_live_registers(masm);
3517 
3518   // We are back the the original state on entry.
3519 
3520   // Tail-call forward_exception_entry, with the issuing PC in O7,
3521   // so it looks like the original nmethod called forward_exception_entry.
3522   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3523   __ JMP(O0, 0);
3524   __ delayed()->nop();
3525 
3526   // -------------
3527   // make sure all code is generated
3528   masm->flush();
3529 
3530   // return the  blob
3531   // frame_size_words or bytes??
3532   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3533 }