rev 5968 : 8031320: Use Intel RTM instructions for locks
Summary: Use RTM for inflated locks and stack locks.
Reviewed-by: iveresov, twisti, roland, dcubed

   1 /*
   2  * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "oops/compiledICHolder.hpp"
  33 #include "prims/jvmtiRedefineClassesTrace.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/vframeArray.hpp"
  36 #include "vmreg_x86.inline.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 #ifdef COMPILER2
  41 #include "opto/runtime.hpp"
  42 #endif
  43 
  44 #define __ masm->
  45 
  46 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  47 
  48 class RegisterSaver {
  49   // Capture info about frame layout
  50 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  51   enum layout {
  52                 fpu_state_off = 0,
  53                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  54                 st0_off, st0H_off,
  55                 st1_off, st1H_off,
  56                 st2_off, st2H_off,
  57                 st3_off, st3H_off,
  58                 st4_off, st4H_off,
  59                 st5_off, st5H_off,
  60                 st6_off, st6H_off,
  61                 st7_off, st7H_off,
  62                 xmm_off,
  63                 DEF_XMM_OFFS(0),
  64                 DEF_XMM_OFFS(1),
  65                 DEF_XMM_OFFS(2),
  66                 DEF_XMM_OFFS(3),
  67                 DEF_XMM_OFFS(4),
  68                 DEF_XMM_OFFS(5),
  69                 DEF_XMM_OFFS(6),
  70                 DEF_XMM_OFFS(7),
  71                 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
  72                 rdi_off,
  73                 rsi_off,
  74                 ignore_off,  // extra copy of rbp,
  75                 rsp_off,
  76                 rbx_off,
  77                 rdx_off,
  78                 rcx_off,
  79                 rax_off,
  80                 // The frame sender code expects that rbp will be in the "natural" place and
  81                 // will override any oopMap setting for it. We must therefore force the layout
  82                 // so that it agrees with the frame sender code.
  83                 rbp_off,
  84                 return_off,      // slot for return address
  85                 reg_save_size };
  86   enum { FPU_regs_live = flags_off - fpu_state_end };
  87 
  88   public:
  89 
  90   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
  91                                      int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
  92   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
  93 
  94   static int rax_offset() { return rax_off; }
  95   static int rbx_offset() { return rbx_off; }
  96 
  97   // Offsets into the register save area
  98   // Used by deoptimization when it is managing result register
  99   // values on its own
 100 
 101   static int raxOffset(void) { return rax_off; }
 102   static int rdxOffset(void) { return rdx_off; }
 103   static int rbxOffset(void) { return rbx_off; }
 104   static int xmm0Offset(void) { return xmm0_off; }
 105   // This really returns a slot in the fp save area, which one is not important
 106   static int fpResultOffset(void) { return st0_off; }
 107 
 108   // During deoptimization only the result register need to be restored
 109   // all the other values have already been extracted.
 110 
 111   static void restore_result_registers(MacroAssembler* masm);
 112 
 113 };
 114 
 115 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 116                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 117   int vect_words = 0;
 118 #ifdef COMPILER2
 119   if (save_vectors) {
 120     assert(UseAVX > 0, "256bit vectors are supported only with AVX");
 121     assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
 122     // Save upper half of YMM registes
 123     vect_words = 8 * 16 / wordSize;
 124     additional_frame_words += vect_words;
 125   }
 126 #else
 127   assert(!save_vectors, "vectors are generated only by C2");
 128 #endif
 129   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 130   int frame_words = frame_size_in_bytes / wordSize;
 131   *total_frame_words = frame_words;
 132 
 133   assert(FPUStateSizeInWords == 27, "update stack layout");
 134 
 135   // save registers, fpu state, and flags
 136   // We assume caller has already has return address slot on the stack
 137   // We push epb twice in this sequence because we want the real rbp,
 138   // to be under the return like a normal enter and we want to use pusha
 139   // We push by hand instead of pusing push
 140   __ enter();
 141   __ pusha();
 142   __ pushf();
 143   __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
 144   __ push_FPU_state();          // Save FPU state & init
 145 
 146   if (verify_fpu) {
 147     // Some stubs may have non standard FPU control word settings so
 148     // only check and reset the value when it required to be the
 149     // standard value.  The safepoint blob in particular can be used
 150     // in methods which are using the 24 bit control word for
 151     // optimized float math.
 152 
 153 #ifdef ASSERT
 154     // Make sure the control word has the expected value
 155     Label ok;
 156     __ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
 157     __ jccb(Assembler::equal, ok);
 158     __ stop("corrupted control word detected");
 159     __ bind(ok);
 160 #endif
 161 
 162     // Reset the control word to guard against exceptions being unmasked
 163     // since fstp_d can cause FPU stack underflow exceptions.  Write it
 164     // into the on stack copy and then reload that to make sure that the
 165     // current and future values are correct.
 166     __ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
 167   }
 168 
 169   __ frstor(Address(rsp, 0));
 170   if (!verify_fpu) {
 171     // Set the control word so that exceptions are masked for the
 172     // following code.
 173     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
 174   }
 175 
 176   // Save the FPU registers in de-opt-able form
 177 
 178   __ fstp_d(Address(rsp, st0_off*wordSize)); // st(0)
 179   __ fstp_d(Address(rsp, st1_off*wordSize)); // st(1)
 180   __ fstp_d(Address(rsp, st2_off*wordSize)); // st(2)
 181   __ fstp_d(Address(rsp, st3_off*wordSize)); // st(3)
 182   __ fstp_d(Address(rsp, st4_off*wordSize)); // st(4)
 183   __ fstp_d(Address(rsp, st5_off*wordSize)); // st(5)
 184   __ fstp_d(Address(rsp, st6_off*wordSize)); // st(6)
 185   __ fstp_d(Address(rsp, st7_off*wordSize)); // st(7)
 186 
 187   if( UseSSE == 1 ) {           // Save the XMM state
 188     __ movflt(Address(rsp,xmm0_off*wordSize),xmm0);
 189     __ movflt(Address(rsp,xmm1_off*wordSize),xmm1);
 190     __ movflt(Address(rsp,xmm2_off*wordSize),xmm2);
 191     __ movflt(Address(rsp,xmm3_off*wordSize),xmm3);
 192     __ movflt(Address(rsp,xmm4_off*wordSize),xmm4);
 193     __ movflt(Address(rsp,xmm5_off*wordSize),xmm5);
 194     __ movflt(Address(rsp,xmm6_off*wordSize),xmm6);
 195     __ movflt(Address(rsp,xmm7_off*wordSize),xmm7);
 196   } else if( UseSSE >= 2 ) {
 197     // Save whole 128bit (16 bytes) XMM regiters
 198     __ movdqu(Address(rsp,xmm0_off*wordSize),xmm0);
 199     __ movdqu(Address(rsp,xmm1_off*wordSize),xmm1);
 200     __ movdqu(Address(rsp,xmm2_off*wordSize),xmm2);
 201     __ movdqu(Address(rsp,xmm3_off*wordSize),xmm3);
 202     __ movdqu(Address(rsp,xmm4_off*wordSize),xmm4);
 203     __ movdqu(Address(rsp,xmm5_off*wordSize),xmm5);
 204     __ movdqu(Address(rsp,xmm6_off*wordSize),xmm6);
 205     __ movdqu(Address(rsp,xmm7_off*wordSize),xmm7);
 206   }
 207 
 208   if (vect_words > 0) {
 209     assert(vect_words*wordSize == 128, "");
 210     __ subptr(rsp, 128); // Save upper half of YMM registes
 211     __ vextractf128h(Address(rsp,  0),xmm0);
 212     __ vextractf128h(Address(rsp, 16),xmm1);
 213     __ vextractf128h(Address(rsp, 32),xmm2);
 214     __ vextractf128h(Address(rsp, 48),xmm3);
 215     __ vextractf128h(Address(rsp, 64),xmm4);
 216     __ vextractf128h(Address(rsp, 80),xmm5);
 217     __ vextractf128h(Address(rsp, 96),xmm6);
 218     __ vextractf128h(Address(rsp,112),xmm7);
 219   }
 220 
 221   // Set an oopmap for the call site.  This oopmap will map all
 222   // oop-registers and debug-info registers as callee-saved.  This
 223   // will allow deoptimization at this safepoint to find all possible
 224   // debug-info recordings, as well as let GC find all oops.
 225 
 226   OopMapSet *oop_maps = new OopMapSet();
 227   OopMap* map =  new OopMap( frame_words, 0 );
 228 
 229 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 230 
 231   map->set_callee_saved(STACK_OFFSET( rax_off), rax->as_VMReg());
 232   map->set_callee_saved(STACK_OFFSET( rcx_off), rcx->as_VMReg());
 233   map->set_callee_saved(STACK_OFFSET( rdx_off), rdx->as_VMReg());
 234   map->set_callee_saved(STACK_OFFSET( rbx_off), rbx->as_VMReg());
 235   // rbp, location is known implicitly, no oopMap
 236   map->set_callee_saved(STACK_OFFSET( rsi_off), rsi->as_VMReg());
 237   map->set_callee_saved(STACK_OFFSET( rdi_off), rdi->as_VMReg());
 238   map->set_callee_saved(STACK_OFFSET(st0_off), as_FloatRegister(0)->as_VMReg());
 239   map->set_callee_saved(STACK_OFFSET(st1_off), as_FloatRegister(1)->as_VMReg());
 240   map->set_callee_saved(STACK_OFFSET(st2_off), as_FloatRegister(2)->as_VMReg());
 241   map->set_callee_saved(STACK_OFFSET(st3_off), as_FloatRegister(3)->as_VMReg());
 242   map->set_callee_saved(STACK_OFFSET(st4_off), as_FloatRegister(4)->as_VMReg());
 243   map->set_callee_saved(STACK_OFFSET(st5_off), as_FloatRegister(5)->as_VMReg());
 244   map->set_callee_saved(STACK_OFFSET(st6_off), as_FloatRegister(6)->as_VMReg());
 245   map->set_callee_saved(STACK_OFFSET(st7_off), as_FloatRegister(7)->as_VMReg());
 246   map->set_callee_saved(STACK_OFFSET(xmm0_off), xmm0->as_VMReg());
 247   map->set_callee_saved(STACK_OFFSET(xmm1_off), xmm1->as_VMReg());
 248   map->set_callee_saved(STACK_OFFSET(xmm2_off), xmm2->as_VMReg());
 249   map->set_callee_saved(STACK_OFFSET(xmm3_off), xmm3->as_VMReg());
 250   map->set_callee_saved(STACK_OFFSET(xmm4_off), xmm4->as_VMReg());
 251   map->set_callee_saved(STACK_OFFSET(xmm5_off), xmm5->as_VMReg());
 252   map->set_callee_saved(STACK_OFFSET(xmm6_off), xmm6->as_VMReg());
 253   map->set_callee_saved(STACK_OFFSET(xmm7_off), xmm7->as_VMReg());
 254   // %%% This is really a waste but we'll keep things as they were for now
 255   if (true) {
 256 #define NEXTREG(x) (x)->as_VMReg()->next()
 257     map->set_callee_saved(STACK_OFFSET(st0H_off), NEXTREG(as_FloatRegister(0)));
 258     map->set_callee_saved(STACK_OFFSET(st1H_off), NEXTREG(as_FloatRegister(1)));
 259     map->set_callee_saved(STACK_OFFSET(st2H_off), NEXTREG(as_FloatRegister(2)));
 260     map->set_callee_saved(STACK_OFFSET(st3H_off), NEXTREG(as_FloatRegister(3)));
 261     map->set_callee_saved(STACK_OFFSET(st4H_off), NEXTREG(as_FloatRegister(4)));
 262     map->set_callee_saved(STACK_OFFSET(st5H_off), NEXTREG(as_FloatRegister(5)));
 263     map->set_callee_saved(STACK_OFFSET(st6H_off), NEXTREG(as_FloatRegister(6)));
 264     map->set_callee_saved(STACK_OFFSET(st7H_off), NEXTREG(as_FloatRegister(7)));
 265     map->set_callee_saved(STACK_OFFSET(xmm0H_off), NEXTREG(xmm0));
 266     map->set_callee_saved(STACK_OFFSET(xmm1H_off), NEXTREG(xmm1));
 267     map->set_callee_saved(STACK_OFFSET(xmm2H_off), NEXTREG(xmm2));
 268     map->set_callee_saved(STACK_OFFSET(xmm3H_off), NEXTREG(xmm3));
 269     map->set_callee_saved(STACK_OFFSET(xmm4H_off), NEXTREG(xmm4));
 270     map->set_callee_saved(STACK_OFFSET(xmm5H_off), NEXTREG(xmm5));
 271     map->set_callee_saved(STACK_OFFSET(xmm6H_off), NEXTREG(xmm6));
 272     map->set_callee_saved(STACK_OFFSET(xmm7H_off), NEXTREG(xmm7));
 273 #undef NEXTREG
 274 #undef STACK_OFFSET
 275   }
 276 
 277   return map;
 278 
 279 }
 280 
 281 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 282   // Recover XMM & FPU state
 283   int additional_frame_bytes = 0;
 284 #ifdef COMPILER2
 285   if (restore_vectors) {
 286     assert(UseAVX > 0, "256bit vectors are supported only with AVX");
 287     assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
 288     additional_frame_bytes = 128;
 289   }
 290 #else
 291   assert(!restore_vectors, "vectors are generated only by C2");
 292 #endif
 293   if (UseSSE == 1) {
 294     assert(additional_frame_bytes == 0, "");
 295     __ movflt(xmm0,Address(rsp,xmm0_off*wordSize));
 296     __ movflt(xmm1,Address(rsp,xmm1_off*wordSize));
 297     __ movflt(xmm2,Address(rsp,xmm2_off*wordSize));
 298     __ movflt(xmm3,Address(rsp,xmm3_off*wordSize));
 299     __ movflt(xmm4,Address(rsp,xmm4_off*wordSize));
 300     __ movflt(xmm5,Address(rsp,xmm5_off*wordSize));
 301     __ movflt(xmm6,Address(rsp,xmm6_off*wordSize));
 302     __ movflt(xmm7,Address(rsp,xmm7_off*wordSize));
 303   } else if (UseSSE >= 2) {
 304 #define STACK_ADDRESS(x) Address(rsp,(x)*wordSize + additional_frame_bytes)
 305     __ movdqu(xmm0,STACK_ADDRESS(xmm0_off));
 306     __ movdqu(xmm1,STACK_ADDRESS(xmm1_off));
 307     __ movdqu(xmm2,STACK_ADDRESS(xmm2_off));
 308     __ movdqu(xmm3,STACK_ADDRESS(xmm3_off));
 309     __ movdqu(xmm4,STACK_ADDRESS(xmm4_off));
 310     __ movdqu(xmm5,STACK_ADDRESS(xmm5_off));
 311     __ movdqu(xmm6,STACK_ADDRESS(xmm6_off));
 312     __ movdqu(xmm7,STACK_ADDRESS(xmm7_off));
 313 #undef STACK_ADDRESS
 314   }
 315   if (restore_vectors) {
 316     // Restore upper half of YMM registes.
 317     assert(additional_frame_bytes == 128, "");
 318     __ vinsertf128h(xmm0, Address(rsp,  0));
 319     __ vinsertf128h(xmm1, Address(rsp, 16));
 320     __ vinsertf128h(xmm2, Address(rsp, 32));
 321     __ vinsertf128h(xmm3, Address(rsp, 48));
 322     __ vinsertf128h(xmm4, Address(rsp, 64));
 323     __ vinsertf128h(xmm5, Address(rsp, 80));
 324     __ vinsertf128h(xmm6, Address(rsp, 96));
 325     __ vinsertf128h(xmm7, Address(rsp,112));
 326     __ addptr(rsp, additional_frame_bytes);
 327   }
 328   __ pop_FPU_state();
 329   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 330 
 331   __ popf();
 332   __ popa();
 333   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 334   __ pop(rbp);
 335 
 336 }
 337 
 338 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 339 
 340   // Just restore result register. Only used by deoptimization. By
 341   // now any callee save register that needs to be restore to a c2
 342   // caller of the deoptee has been extracted into the vframeArray
 343   // and will be stuffed into the c2i adapter we create for later
 344   // restoration so only result registers need to be restored here.
 345   //
 346 
 347   __ frstor(Address(rsp, 0));      // Restore fpu state
 348 
 349   // Recover XMM & FPU state
 350   if( UseSSE == 1 ) {
 351     __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
 352   } else if( UseSSE >= 2 ) {
 353     __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
 354   }
 355   __ movptr(rax, Address(rsp, rax_off*wordSize));
 356   __ movptr(rdx, Address(rsp, rdx_off*wordSize));
 357   // Pop all of the register save are off the stack except the return address
 358   __ addptr(rsp, return_off * wordSize);
 359 }
 360 
 361 // Is vector's size (in bytes) bigger than a size saved by default?
 362 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
 363 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
 364 bool SharedRuntime::is_wide_vector(int size) {
 365   return size > 16;
 366 }
 367 
 368 // The java_calling_convention describes stack locations as ideal slots on
 369 // a frame with no abi restrictions. Since we must observe abi restrictions
 370 // (like the placement of the register window) the slots must be biased by
 371 // the following value.
 372 static int reg2offset_in(VMReg r) {
 373   // Account for saved rbp, and return address
 374   // This should really be in_preserve_stack_slots
 375   return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
 376 }
 377 
 378 static int reg2offset_out(VMReg r) {
 379   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 380 }
 381 
 382 // ---------------------------------------------------------------------------
 383 // Read the array of BasicTypes from a signature, and compute where the
 384 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 385 // quantities.  Values less than SharedInfo::stack0 are registers, those above
 386 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 387 // as framesizes are fixed.
 388 // VMRegImpl::stack0 refers to the first slot 0(sp).
 389 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 390 // up to RegisterImpl::number_of_registers) are the 32-bit
 391 // integer registers.
 392 
 393 // Pass first two oop/int args in registers ECX and EDX.
 394 // Pass first two float/double args in registers XMM0 and XMM1.
 395 // Doubles have precedence, so if you pass a mix of floats and doubles
 396 // the doubles will grab the registers before the floats will.
 397 
 398 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 399 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 400 // units regardless of build. Of course for i486 there is no 64 bit build
 401 
 402 
 403 // ---------------------------------------------------------------------------
 404 // The compiled Java calling convention.
 405 // Pass first two oop/int args in registers ECX and EDX.
 406 // Pass first two float/double args in registers XMM0 and XMM1.
 407 // Doubles have precedence, so if you pass a mix of floats and doubles
 408 // the doubles will grab the registers before the floats will.
 409 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 410                                            VMRegPair *regs,
 411                                            int total_args_passed,
 412                                            int is_outgoing) {
 413   uint    stack = 0;          // Starting stack position for args on stack
 414 
 415 
 416   // Pass first two oop/int args in registers ECX and EDX.
 417   uint reg_arg0 = 9999;
 418   uint reg_arg1 = 9999;
 419 
 420   // Pass first two float/double args in registers XMM0 and XMM1.
 421   // Doubles have precedence, so if you pass a mix of floats and doubles
 422   // the doubles will grab the registers before the floats will.
 423   // CNC - TURNED OFF FOR non-SSE.
 424   //       On Intel we have to round all doubles (and most floats) at
 425   //       call sites by storing to the stack in any case.
 426   // UseSSE=0 ==> Don't Use ==> 9999+0
 427   // UseSSE=1 ==> Floats only ==> 9999+1
 428   // UseSSE>=2 ==> Floats or doubles ==> 9999+2
 429   enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
 430   uint fargs = (UseSSE>=2) ? 2 : UseSSE;
 431   uint freg_arg0 = 9999+fargs;
 432   uint freg_arg1 = 9999+fargs;
 433 
 434   // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
 435   int i;
 436   for( i = 0; i < total_args_passed; i++) {
 437     if( sig_bt[i] == T_DOUBLE ) {
 438       // first 2 doubles go in registers
 439       if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
 440       else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
 441       else // Else double is passed low on the stack to be aligned.
 442         stack += 2;
 443     } else if( sig_bt[i] == T_LONG ) {
 444       stack += 2;
 445     }
 446   }
 447   int dstack = 0;             // Separate counter for placing doubles
 448 
 449   // Now pick where all else goes.
 450   for( i = 0; i < total_args_passed; i++) {
 451     // From the type and the argument number (count) compute the location
 452     switch( sig_bt[i] ) {
 453     case T_SHORT:
 454     case T_CHAR:
 455     case T_BYTE:
 456     case T_BOOLEAN:
 457     case T_INT:
 458     case T_ARRAY:
 459     case T_OBJECT:
 460     case T_ADDRESS:
 461       if( reg_arg0 == 9999 )  {
 462         reg_arg0 = i;
 463         regs[i].set1(rcx->as_VMReg());
 464       } else if( reg_arg1 == 9999 )  {
 465         reg_arg1 = i;
 466         regs[i].set1(rdx->as_VMReg());
 467       } else {
 468         regs[i].set1(VMRegImpl::stack2reg(stack++));
 469       }
 470       break;
 471     case T_FLOAT:
 472       if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
 473         freg_arg0 = i;
 474         regs[i].set1(xmm0->as_VMReg());
 475       } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
 476         freg_arg1 = i;
 477         regs[i].set1(xmm1->as_VMReg());
 478       } else {
 479         regs[i].set1(VMRegImpl::stack2reg(stack++));
 480       }
 481       break;
 482     case T_LONG:
 483       assert(sig_bt[i+1] == T_VOID, "missing Half" );
 484       regs[i].set2(VMRegImpl::stack2reg(dstack));
 485       dstack += 2;
 486       break;
 487     case T_DOUBLE:
 488       assert(sig_bt[i+1] == T_VOID, "missing Half" );
 489       if( freg_arg0 == (uint)i ) {
 490         regs[i].set2(xmm0->as_VMReg());
 491       } else if( freg_arg1 == (uint)i ) {
 492         regs[i].set2(xmm1->as_VMReg());
 493       } else {
 494         regs[i].set2(VMRegImpl::stack2reg(dstack));
 495         dstack += 2;
 496       }
 497       break;
 498     case T_VOID: regs[i].set_bad(); break;
 499       break;
 500     default:
 501       ShouldNotReachHere();
 502       break;
 503     }
 504   }
 505 
 506   // return value can be odd number of VMRegImpl stack slots make multiple of 2
 507   return round_to(stack, 2);
 508 }
 509 
 510 // Patch the callers callsite with entry to compiled code if it exists.
 511 static void patch_callers_callsite(MacroAssembler *masm) {
 512   Label L;
 513   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 514   __ jcc(Assembler::equal, L);
 515   // Schedule the branch target address early.
 516   // Call into the VM to patch the caller, then jump to compiled callee
 517   // rax, isn't live so capture return address while we easily can
 518   __ movptr(rax, Address(rsp, 0));
 519   __ pusha();
 520   __ pushf();
 521 
 522   if (UseSSE == 1) {
 523     __ subptr(rsp, 2*wordSize);
 524     __ movflt(Address(rsp, 0), xmm0);
 525     __ movflt(Address(rsp, wordSize), xmm1);
 526   }
 527   if (UseSSE >= 2) {
 528     __ subptr(rsp, 4*wordSize);
 529     __ movdbl(Address(rsp, 0), xmm0);
 530     __ movdbl(Address(rsp, 2*wordSize), xmm1);
 531   }
 532 #ifdef COMPILER2
 533   // C2 may leave the stack dirty if not in SSE2+ mode
 534   if (UseSSE >= 2) {
 535     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 536   } else {
 537     __ empty_FPU_stack();
 538   }
 539 #endif /* COMPILER2 */
 540 
 541   // VM needs caller's callsite
 542   __ push(rax);
 543   // VM needs target method
 544   __ push(rbx);
 545   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 546   __ addptr(rsp, 2*wordSize);
 547 
 548   if (UseSSE == 1) {
 549     __ movflt(xmm0, Address(rsp, 0));
 550     __ movflt(xmm1, Address(rsp, wordSize));
 551     __ addptr(rsp, 2*wordSize);
 552   }
 553   if (UseSSE >= 2) {
 554     __ movdbl(xmm0, Address(rsp, 0));
 555     __ movdbl(xmm1, Address(rsp, 2*wordSize));
 556     __ addptr(rsp, 4*wordSize);
 557   }
 558 
 559   __ popf();
 560   __ popa();
 561   __ bind(L);
 562 }
 563 
 564 
 565 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
 566   int next_off = st_off - Interpreter::stackElementSize;
 567   __ movdbl(Address(rsp, next_off), r);
 568 }
 569 
 570 static void gen_c2i_adapter(MacroAssembler *masm,
 571                             int total_args_passed,
 572                             int comp_args_on_stack,
 573                             const BasicType *sig_bt,
 574                             const VMRegPair *regs,
 575                             Label& skip_fixup) {
 576   // Before we get into the guts of the C2I adapter, see if we should be here
 577   // at all.  We've come from compiled code and are attempting to jump to the
 578   // interpreter, which means the caller made a static call to get here
 579   // (vcalls always get a compiled target if there is one).  Check for a
 580   // compiled target.  If there is one, we need to patch the caller's call.
 581   patch_callers_callsite(masm);
 582 
 583   __ bind(skip_fixup);
 584 
 585 #ifdef COMPILER2
 586   // C2 may leave the stack dirty if not in SSE2+ mode
 587   if (UseSSE >= 2) {
 588     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 589   } else {
 590     __ empty_FPU_stack();
 591   }
 592 #endif /* COMPILER2 */
 593 
 594   // Since all args are passed on the stack, total_args_passed * interpreter_
 595   // stack_element_size  is the
 596   // space we need.
 597   int extraspace = total_args_passed * Interpreter::stackElementSize;
 598 
 599   // Get return address
 600   __ pop(rax);
 601 
 602   // set senderSP value
 603   __ movptr(rsi, rsp);
 604 
 605   __ subptr(rsp, extraspace);
 606 
 607   // Now write the args into the outgoing interpreter space
 608   for (int i = 0; i < total_args_passed; i++) {
 609     if (sig_bt[i] == T_VOID) {
 610       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 611       continue;
 612     }
 613 
 614     // st_off points to lowest address on stack.
 615     int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
 616     int next_off = st_off - Interpreter::stackElementSize;
 617 
 618     // Say 4 args:
 619     // i   st_off
 620     // 0   12 T_LONG
 621     // 1    8 T_VOID
 622     // 2    4 T_OBJECT
 623     // 3    0 T_BOOL
 624     VMReg r_1 = regs[i].first();
 625     VMReg r_2 = regs[i].second();
 626     if (!r_1->is_valid()) {
 627       assert(!r_2->is_valid(), "");
 628       continue;
 629     }
 630 
 631     if (r_1->is_stack()) {
 632       // memory to memory use fpu stack top
 633       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 634 
 635       if (!r_2->is_valid()) {
 636         __ movl(rdi, Address(rsp, ld_off));
 637         __ movptr(Address(rsp, st_off), rdi);
 638       } else {
 639 
 640         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
 641         // st_off == MSW, st_off-wordSize == LSW
 642 
 643         __ movptr(rdi, Address(rsp, ld_off));
 644         __ movptr(Address(rsp, next_off), rdi);
 645 #ifndef _LP64
 646         __ movptr(rdi, Address(rsp, ld_off + wordSize));
 647         __ movptr(Address(rsp, st_off), rdi);
 648 #else
 649 #ifdef ASSERT
 650         // Overwrite the unused slot with known junk
 651         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 652         __ movptr(Address(rsp, st_off), rax);
 653 #endif /* ASSERT */
 654 #endif // _LP64
 655       }
 656     } else if (r_1->is_Register()) {
 657       Register r = r_1->as_Register();
 658       if (!r_2->is_valid()) {
 659         __ movl(Address(rsp, st_off), r);
 660       } else {
 661         // long/double in gpr
 662         NOT_LP64(ShouldNotReachHere());
 663         // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 664         // T_DOUBLE and T_LONG use two slots in the interpreter
 665         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 666           // long/double in gpr
 667 #ifdef ASSERT
 668           // Overwrite the unused slot with known junk
 669           LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
 670           __ movptr(Address(rsp, st_off), rax);
 671 #endif /* ASSERT */
 672           __ movptr(Address(rsp, next_off), r);
 673         } else {
 674           __ movptr(Address(rsp, st_off), r);
 675         }
 676       }
 677     } else {
 678       assert(r_1->is_XMMRegister(), "");
 679       if (!r_2->is_valid()) {
 680         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 681       } else {
 682         assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
 683         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
 684       }
 685     }
 686   }
 687 
 688   // Schedule the branch target address early.
 689   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 690   // And repush original return address
 691   __ push(rax);
 692   __ jmp(rcx);
 693 }
 694 
 695 
 696 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
 697   int next_val_off = ld_off - Interpreter::stackElementSize;
 698   __ movdbl(r, Address(saved_sp, next_val_off));
 699 }
 700 
 701 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 702                         address code_start, address code_end,
 703                         Label& L_ok) {
 704   Label L_fail;
 705   __ lea(temp_reg, ExternalAddress(code_start));
 706   __ cmpptr(pc_reg, temp_reg);
 707   __ jcc(Assembler::belowEqual, L_fail);
 708   __ lea(temp_reg, ExternalAddress(code_end));
 709   __ cmpptr(pc_reg, temp_reg);
 710   __ jcc(Assembler::below, L_ok);
 711   __ bind(L_fail);
 712 }
 713 
 714 static void gen_i2c_adapter(MacroAssembler *masm,
 715                             int total_args_passed,
 716                             int comp_args_on_stack,
 717                             const BasicType *sig_bt,
 718                             const VMRegPair *regs) {
 719 
 720   // Note: rsi contains the senderSP on entry. We must preserve it since
 721   // we may do a i2c -> c2i transition if we lose a race where compiled
 722   // code goes non-entrant while we get args ready.
 723 
 724   // Adapters can be frameless because they do not require the caller
 725   // to perform additional cleanup work, such as correcting the stack pointer.
 726   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 727   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 728   // even if a callee has modified the stack pointer.
 729   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 730   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 731   // up via the senderSP register).
 732   // In other words, if *either* the caller or callee is interpreted, we can
 733   // get the stack pointer repaired after a call.
 734   // This is why c2i and i2c adapters cannot be indefinitely composed.
 735   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 736   // both caller and callee would be compiled methods, and neither would
 737   // clean up the stack pointer changes performed by the two adapters.
 738   // If this happens, control eventually transfers back to the compiled
 739   // caller, but with an uncorrected stack, causing delayed havoc.
 740 
 741   // Pick up the return address
 742   __ movptr(rax, Address(rsp, 0));
 743 
 744   if (VerifyAdapterCalls &&
 745       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 746     // So, let's test for cascading c2i/i2c adapters right now.
 747     //  assert(Interpreter::contains($return_addr) ||
 748     //         StubRoutines::contains($return_addr),
 749     //         "i2c adapter must return to an interpreter frame");
 750     __ block_comment("verify_i2c { ");
 751     Label L_ok;
 752     if (Interpreter::code() != NULL)
 753       range_check(masm, rax, rdi,
 754                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 755                   L_ok);
 756     if (StubRoutines::code1() != NULL)
 757       range_check(masm, rax, rdi,
 758                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 759                   L_ok);
 760     if (StubRoutines::code2() != NULL)
 761       range_check(masm, rax, rdi,
 762                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 763                   L_ok);
 764     const char* msg = "i2c adapter must return to an interpreter frame";
 765     __ block_comment(msg);
 766     __ stop(msg);
 767     __ bind(L_ok);
 768     __ block_comment("} verify_i2ce ");
 769   }
 770 
 771   // Must preserve original SP for loading incoming arguments because
 772   // we need to align the outgoing SP for compiled code.
 773   __ movptr(rdi, rsp);
 774 
 775   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
 776   // in registers, we will occasionally have no stack args.
 777   int comp_words_on_stack = 0;
 778   if (comp_args_on_stack) {
 779     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
 780     // registers are below.  By subtracting stack0, we either get a negative
 781     // number (all values in registers) or the maximum stack slot accessed.
 782     // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
 783     // Convert 4-byte stack slots to words.
 784     comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
 785     // Round up to miminum stack alignment, in wordSize
 786     comp_words_on_stack = round_to(comp_words_on_stack, 2);
 787     __ subptr(rsp, comp_words_on_stack * wordSize);
 788   }
 789 
 790   // Align the outgoing SP
 791   __ andptr(rsp, -(StackAlignmentInBytes));
 792 
 793   // push the return address on the stack (note that pushing, rather
 794   // than storing it, yields the correct frame alignment for the callee)
 795   __ push(rax);
 796 
 797   // Put saved SP in another register
 798   const Register saved_sp = rax;
 799   __ movptr(saved_sp, rdi);
 800 
 801 
 802   // Will jump to the compiled code just as if compiled code was doing it.
 803   // Pre-load the register-jump target early, to schedule it better.
 804   __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
 805 
 806   // Now generate the shuffle code.  Pick up all register args and move the
 807   // rest through the floating point stack top.
 808   for (int i = 0; i < total_args_passed; i++) {
 809     if (sig_bt[i] == T_VOID) {
 810       // Longs and doubles are passed in native word order, but misaligned
 811       // in the 32-bit build.
 812       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 813       continue;
 814     }
 815 
 816     // Pick up 0, 1 or 2 words from SP+offset.
 817 
 818     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 819             "scrambled load targets?");
 820     // Load in argument order going down.
 821     int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
 822     // Point to interpreter value (vs. tag)
 823     int next_off = ld_off - Interpreter::stackElementSize;
 824     //
 825     //
 826     //
 827     VMReg r_1 = regs[i].first();
 828     VMReg r_2 = regs[i].second();
 829     if (!r_1->is_valid()) {
 830       assert(!r_2->is_valid(), "");
 831       continue;
 832     }
 833     if (r_1->is_stack()) {
 834       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 835       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 836 
 837       // We can use rsi as a temp here because compiled code doesn't need rsi as an input
 838       // and if we end up going thru a c2i because of a miss a reasonable value of rsi
 839       // we be generated.
 840       if (!r_2->is_valid()) {
 841         // __ fld_s(Address(saved_sp, ld_off));
 842         // __ fstp_s(Address(rsp, st_off));
 843         __ movl(rsi, Address(saved_sp, ld_off));
 844         __ movptr(Address(rsp, st_off), rsi);
 845       } else {
 846         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 847         // are accessed as negative so LSW is at LOW address
 848 
 849         // ld_off is MSW so get LSW
 850         // st_off is LSW (i.e. reg.first())
 851         // __ fld_d(Address(saved_sp, next_off));
 852         // __ fstp_d(Address(rsp, st_off));
 853         //
 854         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 855         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 856         // So we must adjust where to pick up the data to match the interpreter.
 857         //
 858         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 859         // are accessed as negative so LSW is at LOW address
 860 
 861         // ld_off is MSW so get LSW
 862         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 863                            next_off : ld_off;
 864         __ movptr(rsi, Address(saved_sp, offset));
 865         __ movptr(Address(rsp, st_off), rsi);
 866 #ifndef _LP64
 867         __ movptr(rsi, Address(saved_sp, ld_off));
 868         __ movptr(Address(rsp, st_off + wordSize), rsi);
 869 #endif // _LP64
 870       }
 871     } else if (r_1->is_Register()) {  // Register argument
 872       Register r = r_1->as_Register();
 873       assert(r != rax, "must be different");
 874       if (r_2->is_valid()) {
 875         //
 876         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 877         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 878         // So we must adjust where to pick up the data to match the interpreter.
 879 
 880         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 881                            next_off : ld_off;
 882 
 883         // this can be a misaligned move
 884         __ movptr(r, Address(saved_sp, offset));
 885 #ifndef _LP64
 886         assert(r_2->as_Register() != rax, "need another temporary register");
 887         // Remember r_1 is low address (and LSB on x86)
 888         // So r_2 gets loaded from high address regardless of the platform
 889         __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
 890 #endif // _LP64
 891       } else {
 892         __ movl(r, Address(saved_sp, ld_off));
 893       }
 894     } else {
 895       assert(r_1->is_XMMRegister(), "");
 896       if (!r_2->is_valid()) {
 897         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 898       } else {
 899         move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
 900       }
 901     }
 902   }
 903 
 904   // 6243940 We might end up in handle_wrong_method if
 905   // the callee is deoptimized as we race thru here. If that
 906   // happens we don't want to take a safepoint because the
 907   // caller frame will look interpreted and arguments are now
 908   // "compiled" so it is much better to make this transition
 909   // invisible to the stack walking code. Unfortunately if
 910   // we try and find the callee by normal means a safepoint
 911   // is possible. So we stash the desired callee in the thread
 912   // and the vm will find there should this case occur.
 913 
 914   __ get_thread(rax);
 915   __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
 916 
 917   // move Method* to rax, in case we end up in an c2i adapter.
 918   // the c2i adapters expect Method* in rax, (c2) because c2's
 919   // resolve stubs return the result (the method) in rax,.
 920   // I'd love to fix this.
 921   __ mov(rax, rbx);
 922 
 923   __ jmp(rdi);
 924 }
 925 
 926 // ---------------------------------------------------------------
 927 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 928                                                             int total_args_passed,
 929                                                             int comp_args_on_stack,
 930                                                             const BasicType *sig_bt,
 931                                                             const VMRegPair *regs,
 932                                                             AdapterFingerPrint* fingerprint) {
 933   address i2c_entry = __ pc();
 934 
 935   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 936 
 937   // -------------------------------------------------------------------------
 938   // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
 939   // to the interpreter.  The args start out packed in the compiled layout.  They
 940   // need to be unpacked into the interpreter layout.  This will almost always
 941   // require some stack space.  We grow the current (compiled) stack, then repack
 942   // the args.  We  finally end in a jump to the generic interpreter entry point.
 943   // On exit from the interpreter, the interpreter will restore our SP (lest the
 944   // compiled code, which relys solely on SP and not EBP, get sick).
 945 
 946   address c2i_unverified_entry = __ pc();
 947   Label skip_fixup;
 948 
 949   Register holder = rax;
 950   Register receiver = rcx;
 951   Register temp = rbx;
 952 
 953   {
 954 
 955     Label missed;
 956     __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
 957     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 958     __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
 959     __ jcc(Assembler::notEqual, missed);
 960     // Method might have been compiled since the call site was patched to
 961     // interpreted if that is the case treat it as a miss so we can get
 962     // the call site corrected.
 963     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 964     __ jcc(Assembler::equal, skip_fixup);
 965 
 966     __ bind(missed);
 967     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 968   }
 969 
 970   address c2i_entry = __ pc();
 971 
 972   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 973 
 974   __ flush();
 975   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 976 }
 977 
 978 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 979                                          VMRegPair *regs,
 980                                          int total_args_passed) {
 981 // We return the amount of VMRegImpl stack slots we need to reserve for all
 982 // the arguments NOT counting out_preserve_stack_slots.
 983 
 984   uint    stack = 0;        // All arguments on stack
 985 
 986   for( int i = 0; i < total_args_passed; i++) {
 987     // From the type and the argument number (count) compute the location
 988     switch( sig_bt[i] ) {
 989     case T_BOOLEAN:
 990     case T_CHAR:
 991     case T_FLOAT:
 992     case T_BYTE:
 993     case T_SHORT:
 994     case T_INT:
 995     case T_OBJECT:
 996     case T_ARRAY:
 997     case T_ADDRESS:
 998     case T_METADATA:
 999       regs[i].set1(VMRegImpl::stack2reg(stack++));
1000       break;
1001     case T_LONG:
1002     case T_DOUBLE: // The stack numbering is reversed from Java
1003       // Since C arguments do not get reversed, the ordering for
1004       // doubles on the stack must be opposite the Java convention
1005       assert(sig_bt[i+1] == T_VOID, "missing Half" );
1006       regs[i].set2(VMRegImpl::stack2reg(stack));
1007       stack += 2;
1008       break;
1009     case T_VOID: regs[i].set_bad(); break;
1010     default:
1011       ShouldNotReachHere();
1012       break;
1013     }
1014   }
1015   return stack;
1016 }
1017 
1018 // A simple move of integer like type
1019 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1020   if (src.first()->is_stack()) {
1021     if (dst.first()->is_stack()) {
1022       // stack to stack
1023       // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1024       // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1025       __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1026       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1027     } else {
1028       // stack to reg
1029       __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
1030     }
1031   } else if (dst.first()->is_stack()) {
1032     // reg to stack
1033     // no need to sign extend on 64bit
1034     __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1035   } else {
1036     if (dst.first() != src.first()) {
1037       __ mov(dst.first()->as_Register(), src.first()->as_Register());
1038     }
1039   }
1040 }
1041 
1042 // An oop arg. Must pass a handle not the oop itself
1043 static void object_move(MacroAssembler* masm,
1044                         OopMap* map,
1045                         int oop_handle_offset,
1046                         int framesize_in_slots,
1047                         VMRegPair src,
1048                         VMRegPair dst,
1049                         bool is_receiver,
1050                         int* receiver_offset) {
1051 
1052   // Because of the calling conventions we know that src can be a
1053   // register or a stack location. dst can only be a stack location.
1054 
1055   assert(dst.first()->is_stack(), "must be stack");
1056   // must pass a handle. First figure out the location we use as a handle
1057 
1058   if (src.first()->is_stack()) {
1059     // Oop is already on the stack as an argument
1060     Register rHandle = rax;
1061     Label nil;
1062     __ xorptr(rHandle, rHandle);
1063     __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1064     __ jcc(Assembler::equal, nil);
1065     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1066     __ bind(nil);
1067     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1068 
1069     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1070     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1071     if (is_receiver) {
1072       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1073     }
1074   } else {
1075     // Oop is in an a register we must store it to the space we reserve
1076     // on the stack for oop_handles
1077     const Register rOop = src.first()->as_Register();
1078     const Register rHandle = rax;
1079     int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1080     int offset = oop_slot*VMRegImpl::stack_slot_size;
1081     Label skip;
1082     __ movptr(Address(rsp, offset), rOop);
1083     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1084     __ xorptr(rHandle, rHandle);
1085     __ cmpptr(rOop, (int32_t)NULL_WORD);
1086     __ jcc(Assembler::equal, skip);
1087     __ lea(rHandle, Address(rsp, offset));
1088     __ bind(skip);
1089     // Store the handle parameter
1090     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1091     if (is_receiver) {
1092       *receiver_offset = offset;
1093     }
1094   }
1095 }
1096 
1097 // A float arg may have to do float reg int reg conversion
1098 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1099   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1100 
1101   // Because of the calling convention we know that src is either a stack location
1102   // or an xmm register. dst can only be a stack location.
1103 
1104   assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1105 
1106   if (src.first()->is_stack()) {
1107     __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1108     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1109   } else {
1110     // reg to stack
1111     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1112   }
1113 }
1114 
1115 // A long move
1116 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1117 
1118   // The only legal possibility for a long_move VMRegPair is:
1119   // 1: two stack slots (possibly unaligned)
1120   // as neither the java  or C calling convention will use registers
1121   // for longs.
1122 
1123   if (src.first()->is_stack() && dst.first()->is_stack()) {
1124     assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1125     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1126     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1127     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1128     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1129   } else {
1130     ShouldNotReachHere();
1131   }
1132 }
1133 
1134 // A double move
1135 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1136 
1137   // The only legal possibilities for a double_move VMRegPair are:
1138   // The painful thing here is that like long_move a VMRegPair might be
1139 
1140   // Because of the calling convention we know that src is either
1141   //   1: a single physical register (xmm registers only)
1142   //   2: two stack slots (possibly unaligned)
1143   // dst can only be a pair of stack slots.
1144 
1145   assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1146 
1147   if (src.first()->is_stack()) {
1148     // source is all stack
1149     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1150     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1151     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1152     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1153   } else {
1154     // reg to stack
1155     // No worries about stack alignment
1156     __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1157   }
1158 }
1159 
1160 
1161 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1162   // We always ignore the frame_slots arg and just use the space just below frame pointer
1163   // which by this time is free to use
1164   switch (ret_type) {
1165   case T_FLOAT:
1166     __ fstp_s(Address(rbp, -wordSize));
1167     break;
1168   case T_DOUBLE:
1169     __ fstp_d(Address(rbp, -2*wordSize));
1170     break;
1171   case T_VOID:  break;
1172   case T_LONG:
1173     __ movptr(Address(rbp, -wordSize), rax);
1174     NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
1175     break;
1176   default: {
1177     __ movptr(Address(rbp, -wordSize), rax);
1178     }
1179   }
1180 }
1181 
1182 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1183   // We always ignore the frame_slots arg and just use the space just below frame pointer
1184   // which by this time is free to use
1185   switch (ret_type) {
1186   case T_FLOAT:
1187     __ fld_s(Address(rbp, -wordSize));
1188     break;
1189   case T_DOUBLE:
1190     __ fld_d(Address(rbp, -2*wordSize));
1191     break;
1192   case T_LONG:
1193     __ movptr(rax, Address(rbp, -wordSize));
1194     NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
1195     break;
1196   case T_VOID:  break;
1197   default: {
1198     __ movptr(rax, Address(rbp, -wordSize));
1199     }
1200   }
1201 }
1202 
1203 
1204 static void save_or_restore_arguments(MacroAssembler* masm,
1205                                       const int stack_slots,
1206                                       const int total_in_args,
1207                                       const int arg_save_area,
1208                                       OopMap* map,
1209                                       VMRegPair* in_regs,
1210                                       BasicType* in_sig_bt) {
1211   // if map is non-NULL then the code should store the values,
1212   // otherwise it should load them.
1213   int handle_index = 0;
1214   // Save down double word first
1215   for ( int i = 0; i < total_in_args; i++) {
1216     if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1217       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1218       int offset = slot * VMRegImpl::stack_slot_size;
1219       handle_index += 2;
1220       assert(handle_index <= stack_slots, "overflow");
1221       if (map != NULL) {
1222         __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1223       } else {
1224         __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1225       }
1226     }
1227     if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
1228       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1229       int offset = slot * VMRegImpl::stack_slot_size;
1230       handle_index += 2;
1231       assert(handle_index <= stack_slots, "overflow");
1232       if (map != NULL) {
1233         __ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
1234         if (in_regs[i].second()->is_Register()) {
1235           __ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
1236         }
1237       } else {
1238         __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
1239         if (in_regs[i].second()->is_Register()) {
1240           __ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
1241         }
1242       }
1243     }
1244   }
1245   // Save or restore single word registers
1246   for ( int i = 0; i < total_in_args; i++) {
1247     if (in_regs[i].first()->is_Register()) {
1248       int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1249       int offset = slot * VMRegImpl::stack_slot_size;
1250       assert(handle_index <= stack_slots, "overflow");
1251       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1252         map->set_oop(VMRegImpl::stack2reg(slot));;
1253       }
1254 
1255       // Value is in an input register pass we must flush it to the stack
1256       const Register reg = in_regs[i].first()->as_Register();
1257       switch (in_sig_bt[i]) {
1258         case T_ARRAY:
1259           if (map != NULL) {
1260             __ movptr(Address(rsp, offset), reg);
1261           } else {
1262             __ movptr(reg, Address(rsp, offset));
1263           }
1264           break;
1265         case T_BOOLEAN:
1266         case T_CHAR:
1267         case T_BYTE:
1268         case T_SHORT:
1269         case T_INT:
1270           if (map != NULL) {
1271             __ movl(Address(rsp, offset), reg);
1272           } else {
1273             __ movl(reg, Address(rsp, offset));
1274           }
1275           break;
1276         case T_OBJECT:
1277         default: ShouldNotReachHere();
1278       }
1279     } else if (in_regs[i].first()->is_XMMRegister()) {
1280       if (in_sig_bt[i] == T_FLOAT) {
1281         int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1282         int offset = slot * VMRegImpl::stack_slot_size;
1283         assert(handle_index <= stack_slots, "overflow");
1284         if (map != NULL) {
1285           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1286         } else {
1287           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1288         }
1289       }
1290     } else if (in_regs[i].first()->is_stack()) {
1291       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1292         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1293         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1294       }
1295     }
1296   }
1297 }
1298 
1299 // Check GC_locker::needs_gc and enter the runtime if it's true.  This
1300 // keeps a new JNI critical region from starting until a GC has been
1301 // forced.  Save down any oops in registers and describe them in an
1302 // OopMap.
1303 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1304                                                Register thread,
1305                                                int stack_slots,
1306                                                int total_c_args,
1307                                                int total_in_args,
1308                                                int arg_save_area,
1309                                                OopMapSet* oop_maps,
1310                                                VMRegPair* in_regs,
1311                                                BasicType* in_sig_bt) {
1312   __ block_comment("check GC_locker::needs_gc");
1313   Label cont;
1314   __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
1315   __ jcc(Assembler::equal, cont);
1316 
1317   // Save down any incoming oops and call into the runtime to halt for a GC
1318 
1319   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1320 
1321   save_or_restore_arguments(masm, stack_slots, total_in_args,
1322                             arg_save_area, map, in_regs, in_sig_bt);
1323 
1324   address the_pc = __ pc();
1325   oop_maps->add_gc_map( __ offset(), map);
1326   __ set_last_Java_frame(thread, rsp, noreg, the_pc);
1327 
1328   __ block_comment("block_for_jni_critical");
1329   __ push(thread);
1330   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1331   __ increment(rsp, wordSize);
1332 
1333   __ get_thread(thread);
1334   __ reset_last_Java_frame(thread, false, true);
1335 
1336   save_or_restore_arguments(masm, stack_slots, total_in_args,
1337                             arg_save_area, NULL, in_regs, in_sig_bt);
1338 
1339   __ bind(cont);
1340 #ifdef ASSERT
1341   if (StressCriticalJNINatives) {
1342     // Stress register saving
1343     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1344     save_or_restore_arguments(masm, stack_slots, total_in_args,
1345                               arg_save_area, map, in_regs, in_sig_bt);
1346     // Destroy argument registers
1347     for (int i = 0; i < total_in_args - 1; i++) {
1348       if (in_regs[i].first()->is_Register()) {
1349         const Register reg = in_regs[i].first()->as_Register();
1350         __ xorptr(reg, reg);
1351       } else if (in_regs[i].first()->is_XMMRegister()) {
1352         __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1353       } else if (in_regs[i].first()->is_FloatRegister()) {
1354         ShouldNotReachHere();
1355       } else if (in_regs[i].first()->is_stack()) {
1356         // Nothing to do
1357       } else {
1358         ShouldNotReachHere();
1359       }
1360       if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1361         i++;
1362       }
1363     }
1364 
1365     save_or_restore_arguments(masm, stack_slots, total_in_args,
1366                               arg_save_area, NULL, in_regs, in_sig_bt);
1367   }
1368 #endif
1369 }
1370 
1371 // Unpack an array argument into a pointer to the body and the length
1372 // if the array is non-null, otherwise pass 0 for both.
1373 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1374   Register tmp_reg = rax;
1375   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1376          "possible collision");
1377   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1378          "possible collision");
1379 
1380   // Pass the length, ptr pair
1381   Label is_null, done;
1382   VMRegPair tmp(tmp_reg->as_VMReg());
1383   if (reg.first()->is_stack()) {
1384     // Load the arg up from the stack
1385     simple_move32(masm, reg, tmp);
1386     reg = tmp;
1387   }
1388   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1389   __ jccb(Assembler::equal, is_null);
1390   __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1391   simple_move32(masm, tmp, body_arg);
1392   // load the length relative to the body.
1393   __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1394                            arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1395   simple_move32(masm, tmp, length_arg);
1396   __ jmpb(done);
1397   __ bind(is_null);
1398   // Pass zeros
1399   __ xorptr(tmp_reg, tmp_reg);
1400   simple_move32(masm, tmp, body_arg);
1401   simple_move32(masm, tmp, length_arg);
1402   __ bind(done);
1403 }
1404 
1405 static void verify_oop_args(MacroAssembler* masm,
1406                             methodHandle method,
1407                             const BasicType* sig_bt,
1408                             const VMRegPair* regs) {
1409   Register temp_reg = rbx;  // not part of any compiled calling seq
1410   if (VerifyOops) {
1411     for (int i = 0; i < method->size_of_parameters(); i++) {
1412       if (sig_bt[i] == T_OBJECT ||
1413           sig_bt[i] == T_ARRAY) {
1414         VMReg r = regs[i].first();
1415         assert(r->is_valid(), "bad oop arg");
1416         if (r->is_stack()) {
1417           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1418           __ verify_oop(temp_reg);
1419         } else {
1420           __ verify_oop(r->as_Register());
1421         }
1422       }
1423     }
1424   }
1425 }
1426 
1427 static void gen_special_dispatch(MacroAssembler* masm,
1428                                  methodHandle method,
1429                                  const BasicType* sig_bt,
1430                                  const VMRegPair* regs) {
1431   verify_oop_args(masm, method, sig_bt, regs);
1432   vmIntrinsics::ID iid = method->intrinsic_id();
1433 
1434   // Now write the args into the outgoing interpreter space
1435   bool     has_receiver   = false;
1436   Register receiver_reg   = noreg;
1437   int      member_arg_pos = -1;
1438   Register member_reg     = noreg;
1439   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1440   if (ref_kind != 0) {
1441     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1442     member_reg = rbx;  // known to be free at this point
1443     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1444   } else if (iid == vmIntrinsics::_invokeBasic) {
1445     has_receiver = true;
1446   } else {
1447     fatal(err_msg_res("unexpected intrinsic id %d", iid));
1448   }
1449 
1450   if (member_reg != noreg) {
1451     // Load the member_arg into register, if necessary.
1452     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1453     VMReg r = regs[member_arg_pos].first();
1454     if (r->is_stack()) {
1455       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1456     } else {
1457       // no data motion is needed
1458       member_reg = r->as_Register();
1459     }
1460   }
1461 
1462   if (has_receiver) {
1463     // Make sure the receiver is loaded into a register.
1464     assert(method->size_of_parameters() > 0, "oob");
1465     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1466     VMReg r = regs[0].first();
1467     assert(r->is_valid(), "bad receiver arg");
1468     if (r->is_stack()) {
1469       // Porting note:  This assumes that compiled calling conventions always
1470       // pass the receiver oop in a register.  If this is not true on some
1471       // platform, pick a temp and load the receiver from stack.
1472       fatal("receiver always in a register");
1473       receiver_reg = rcx;  // known to be free at this point
1474       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1475     } else {
1476       // no data motion is needed
1477       receiver_reg = r->as_Register();
1478     }
1479   }
1480 
1481   // Figure out which address we are really jumping to:
1482   MethodHandles::generate_method_handle_dispatch(masm, iid,
1483                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1484 }
1485 
1486 // ---------------------------------------------------------------------------
1487 // Generate a native wrapper for a given method.  The method takes arguments
1488 // in the Java compiled code convention, marshals them to the native
1489 // convention (handlizes oops, etc), transitions to native, makes the call,
1490 // returns to java state (possibly blocking), unhandlizes any result and
1491 // returns.
1492 //
1493 // Critical native functions are a shorthand for the use of
1494 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1495 // functions.  The wrapper is expected to unpack the arguments before
1496 // passing them to the callee and perform checks before and after the
1497 // native call to ensure that they GC_locker
1498 // lock_critical/unlock_critical semantics are followed.  Some other
1499 // parts of JNI setup are skipped like the tear down of the JNI handle
1500 // block and the check for pending exceptions it's impossible for them
1501 // to be thrown.
1502 //
1503 // They are roughly structured like this:
1504 //    if (GC_locker::needs_gc())
1505 //      SharedRuntime::block_for_jni_critical();
1506 //    tranistion to thread_in_native
1507 //    unpack arrray arguments and call native entry point
1508 //    check for safepoint in progress
1509 //    check if any thread suspend flags are set
1510 //      call into JVM and possible unlock the JNI critical
1511 //      if a GC was suppressed while in the critical native.
1512 //    transition back to thread_in_Java
1513 //    return to caller
1514 //
1515 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1516                                                 methodHandle method,
1517                                                 int compile_id,
1518                                                 BasicType* in_sig_bt,
1519                                                 VMRegPair* in_regs,
1520                                                 BasicType ret_type) {
1521   if (method->is_method_handle_intrinsic()) {
1522     vmIntrinsics::ID iid = method->intrinsic_id();
1523     intptr_t start = (intptr_t)__ pc();
1524     int vep_offset = ((intptr_t)__ pc()) - start;
1525     gen_special_dispatch(masm,
1526                          method,
1527                          in_sig_bt,
1528                          in_regs);
1529     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1530     __ flush();
1531     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1532     return nmethod::new_native_nmethod(method,
1533                                        compile_id,
1534                                        masm->code(),
1535                                        vep_offset,
1536                                        frame_complete,
1537                                        stack_slots / VMRegImpl::slots_per_word,
1538                                        in_ByteSize(-1),
1539                                        in_ByteSize(-1),
1540                                        (OopMapSet*)NULL);
1541   }
1542   bool is_critical_native = true;
1543   address native_func = method->critical_native_function();
1544   if (native_func == NULL) {
1545     native_func = method->native_function();
1546     is_critical_native = false;
1547   }
1548   assert(native_func != NULL, "must have function");
1549 
1550   // An OopMap for lock (and class if static)
1551   OopMapSet *oop_maps = new OopMapSet();
1552 
1553   // We have received a description of where all the java arg are located
1554   // on entry to the wrapper. We need to convert these args to where
1555   // the jni function will expect them. To figure out where they go
1556   // we convert the java signature to a C signature by inserting
1557   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1558 
1559   const int total_in_args = method->size_of_parameters();
1560   int total_c_args = total_in_args;
1561   if (!is_critical_native) {
1562     total_c_args += 1;
1563     if (method->is_static()) {
1564       total_c_args++;
1565     }
1566   } else {
1567     for (int i = 0; i < total_in_args; i++) {
1568       if (in_sig_bt[i] == T_ARRAY) {
1569         total_c_args++;
1570       }
1571     }
1572   }
1573 
1574   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1575   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1576   BasicType* in_elem_bt = NULL;
1577 
1578   int argc = 0;
1579   if (!is_critical_native) {
1580     out_sig_bt[argc++] = T_ADDRESS;
1581     if (method->is_static()) {
1582       out_sig_bt[argc++] = T_OBJECT;
1583     }
1584 
1585     for (int i = 0; i < total_in_args ; i++ ) {
1586       out_sig_bt[argc++] = in_sig_bt[i];
1587     }
1588   } else {
1589     Thread* THREAD = Thread::current();
1590     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1591     SignatureStream ss(method->signature());
1592     for (int i = 0; i < total_in_args ; i++ ) {
1593       if (in_sig_bt[i] == T_ARRAY) {
1594         // Arrays are passed as int, elem* pair
1595         out_sig_bt[argc++] = T_INT;
1596         out_sig_bt[argc++] = T_ADDRESS;
1597         Symbol* atype = ss.as_symbol(CHECK_NULL);
1598         const char* at = atype->as_C_string();
1599         if (strlen(at) == 2) {
1600           assert(at[0] == '[', "must be");
1601           switch (at[1]) {
1602             case 'B': in_elem_bt[i]  = T_BYTE; break;
1603             case 'C': in_elem_bt[i]  = T_CHAR; break;
1604             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1605             case 'F': in_elem_bt[i]  = T_FLOAT; break;
1606             case 'I': in_elem_bt[i]  = T_INT; break;
1607             case 'J': in_elem_bt[i]  = T_LONG; break;
1608             case 'S': in_elem_bt[i]  = T_SHORT; break;
1609             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1610             default: ShouldNotReachHere();
1611           }
1612         }
1613       } else {
1614         out_sig_bt[argc++] = in_sig_bt[i];
1615         in_elem_bt[i] = T_VOID;
1616       }
1617       if (in_sig_bt[i] != T_VOID) {
1618         assert(in_sig_bt[i] == ss.type(), "must match");
1619         ss.next();
1620       }
1621     }
1622   }
1623 
1624   // Now figure out where the args must be stored and how much stack space
1625   // they require.
1626   int out_arg_slots;
1627   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1628 
1629   // Compute framesize for the wrapper.  We need to handlize all oops in
1630   // registers a max of 2 on x86.
1631 
1632   // Calculate the total number of stack slots we will need.
1633 
1634   // First count the abi requirement plus all of the outgoing args
1635   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1636 
1637   // Now the space for the inbound oop handle area
1638   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1639   if (is_critical_native) {
1640     // Critical natives may have to call out so they need a save area
1641     // for register arguments.
1642     int double_slots = 0;
1643     int single_slots = 0;
1644     for ( int i = 0; i < total_in_args; i++) {
1645       if (in_regs[i].first()->is_Register()) {
1646         const Register reg = in_regs[i].first()->as_Register();
1647         switch (in_sig_bt[i]) {
1648           case T_ARRAY:  // critical array (uses 2 slots on LP64)
1649           case T_BOOLEAN:
1650           case T_BYTE:
1651           case T_SHORT:
1652           case T_CHAR:
1653           case T_INT:  single_slots++; break;
1654           case T_LONG: double_slots++; break;
1655           default:  ShouldNotReachHere();
1656         }
1657       } else if (in_regs[i].first()->is_XMMRegister()) {
1658         switch (in_sig_bt[i]) {
1659           case T_FLOAT:  single_slots++; break;
1660           case T_DOUBLE: double_slots++; break;
1661           default:  ShouldNotReachHere();
1662         }
1663       } else if (in_regs[i].first()->is_FloatRegister()) {
1664         ShouldNotReachHere();
1665       }
1666     }
1667     total_save_slots = double_slots * 2 + single_slots;
1668     // align the save area
1669     if (double_slots != 0) {
1670       stack_slots = round_to(stack_slots, 2);
1671     }
1672   }
1673 
1674   int oop_handle_offset = stack_slots;
1675   stack_slots += total_save_slots;
1676 
1677   // Now any space we need for handlizing a klass if static method
1678 
1679   int klass_slot_offset = 0;
1680   int klass_offset = -1;
1681   int lock_slot_offset = 0;
1682   bool is_static = false;
1683 
1684   if (method->is_static()) {
1685     klass_slot_offset = stack_slots;
1686     stack_slots += VMRegImpl::slots_per_word;
1687     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1688     is_static = true;
1689   }
1690 
1691   // Plus a lock if needed
1692 
1693   if (method->is_synchronized()) {
1694     lock_slot_offset = stack_slots;
1695     stack_slots += VMRegImpl::slots_per_word;
1696   }
1697 
1698   // Now a place (+2) to save return values or temp during shuffling
1699   // + 2 for return address (which we own) and saved rbp,
1700   stack_slots += 4;
1701 
1702   // Ok The space we have allocated will look like:
1703   //
1704   //
1705   // FP-> |                     |
1706   //      |---------------------|
1707   //      | 2 slots for moves   |
1708   //      |---------------------|
1709   //      | lock box (if sync)  |
1710   //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1711   //      | klass (if static)   |
1712   //      |---------------------| <- klass_slot_offset
1713   //      | oopHandle area      |
1714   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1715   //      | outbound memory     |
1716   //      | based arguments     |
1717   //      |                     |
1718   //      |---------------------|
1719   //      |                     |
1720   // SP-> | out_preserved_slots |
1721   //
1722   //
1723   // ****************************************************************************
1724   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1725   // arguments off of the stack after the jni call. Before the call we can use
1726   // instructions that are SP relative. After the jni call we switch to FP
1727   // relative instructions instead of re-adjusting the stack on windows.
1728   // ****************************************************************************
1729 
1730 
1731   // Now compute actual number of stack words we need rounding to make
1732   // stack properly aligned.
1733   stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1734 
1735   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1736 
1737   intptr_t start = (intptr_t)__ pc();
1738 
1739   // First thing make an ic check to see if we should even be here
1740 
1741   // We are free to use all registers as temps without saving them and
1742   // restoring them except rbp. rbp is the only callee save register
1743   // as far as the interpreter and the compiler(s) are concerned.
1744 
1745 
1746   const Register ic_reg = rax;
1747   const Register receiver = rcx;
1748   Label hit;
1749   Label exception_pending;
1750 
1751   __ verify_oop(receiver);
1752   __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1753   __ jcc(Assembler::equal, hit);
1754 
1755   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1756 
1757   // verified entry must be aligned for code patching.
1758   // and the first 5 bytes must be in the same cache line
1759   // if we align at 8 then we will be sure 5 bytes are in the same line
1760   __ align(8);
1761 
1762   __ bind(hit);
1763 
1764   int vep_offset = ((intptr_t)__ pc()) - start;
1765 
1766 #ifdef COMPILER1
1767   if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1768     // Object.hashCode can pull the hashCode from the header word
1769     // instead of doing a full VM transition once it's been computed.
1770     // Since hashCode is usually polymorphic at call sites we can't do
1771     // this optimization at the call site without a lot of work.
1772     Label slowCase;
1773     Register receiver = rcx;
1774     Register result = rax;
1775     __ movptr(result, Address(receiver, oopDesc::mark_offset_in_bytes()));
1776 
1777     // check if locked
1778     __ testptr(result, markOopDesc::unlocked_value);
1779     __ jcc (Assembler::zero, slowCase);
1780 
1781     if (UseBiasedLocking) {
1782       // Check if biased and fall through to runtime if so
1783       __ testptr(result, markOopDesc::biased_lock_bit_in_place);
1784       __ jcc (Assembler::notZero, slowCase);
1785     }
1786 
1787     // get hash
1788     __ andptr(result, markOopDesc::hash_mask_in_place);
1789     // test if hashCode exists
1790     __ jcc  (Assembler::zero, slowCase);
1791     __ shrptr(result, markOopDesc::hash_shift);
1792     __ ret(0);
1793     __ bind (slowCase);
1794   }
1795 #endif // COMPILER1
1796 
1797   // The instruction at the verified entry point must be 5 bytes or longer
1798   // because it can be patched on the fly by make_non_entrant. The stack bang
1799   // instruction fits that requirement.
1800 
1801   // Generate stack overflow check
1802 
1803   if (UseStackBanging) {
1804     __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1805   } else {
1806     // need a 5 byte instruction to allow MT safe patching to non-entrant
1807     __ fat_nop();
1808   }
1809 
1810   // Generate a new frame for the wrapper.
1811   __ enter();
1812   // -2 because return address is already present and so is saved rbp
1813   __ subptr(rsp, stack_size - 2*wordSize);
1814 
1815   // Frame is now completed as far as size and linkage.
1816   int frame_complete = ((intptr_t)__ pc()) - start;
1817 
1818   if (UseRTMLocking) {
1819     // Abort RTM transaction before calling JNI
1820     // because critical section will be large and will be
1821     // aborted anyway. Also nmethod could be deoptimized.
1822     __ xabort(0);
1823   }
1824 
1825   // Calculate the difference between rsp and rbp,. We need to know it
1826   // after the native call because on windows Java Natives will pop
1827   // the arguments and it is painful to do rsp relative addressing
1828   // in a platform independent way. So after the call we switch to
1829   // rbp, relative addressing.
1830 
1831   int fp_adjustment = stack_size - 2*wordSize;
1832 
1833 #ifdef COMPILER2
1834   // C2 may leave the stack dirty if not in SSE2+ mode
1835   if (UseSSE >= 2) {
1836     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1837   } else {
1838     __ empty_FPU_stack();
1839   }
1840 #endif /* COMPILER2 */
1841 
1842   // Compute the rbp, offset for any slots used after the jni call
1843 
1844   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1845 
1846   // We use rdi as a thread pointer because it is callee save and
1847   // if we load it once it is usable thru the entire wrapper
1848   const Register thread = rdi;
1849 
1850   // We use rsi as the oop handle for the receiver/klass
1851   // It is callee save so it survives the call to native
1852 
1853   const Register oop_handle_reg = rsi;
1854 
1855   __ get_thread(thread);
1856 
1857   if (is_critical_native) {
1858     check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
1859                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1860   }
1861 
1862   //
1863   // We immediately shuffle the arguments so that any vm call we have to
1864   // make from here on out (sync slow path, jvmti, etc.) we will have
1865   // captured the oops from our caller and have a valid oopMap for
1866   // them.
1867 
1868   // -----------------
1869   // The Grand Shuffle
1870   //
1871   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1872   // and, if static, the class mirror instead of a receiver.  This pretty much
1873   // guarantees that register layout will not match (and x86 doesn't use reg
1874   // parms though amd does).  Since the native abi doesn't use register args
1875   // and the java conventions does we don't have to worry about collisions.
1876   // All of our moved are reg->stack or stack->stack.
1877   // We ignore the extra arguments during the shuffle and handle them at the
1878   // last moment. The shuffle is described by the two calling convention
1879   // vectors we have in our possession. We simply walk the java vector to
1880   // get the source locations and the c vector to get the destinations.
1881 
1882   int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
1883 
1884   // Record rsp-based slot for receiver on stack for non-static methods
1885   int receiver_offset = -1;
1886 
1887   // This is a trick. We double the stack slots so we can claim
1888   // the oops in the caller's frame. Since we are sure to have
1889   // more args than the caller doubling is enough to make
1890   // sure we can capture all the incoming oop args from the
1891   // caller.
1892   //
1893   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1894 
1895   // Mark location of rbp,
1896   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1897 
1898   // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1899   // Are free to temporaries if we have to do  stack to steck moves.
1900   // All inbound args are referenced based on rbp, and all outbound args via rsp.
1901 
1902   for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1903     switch (in_sig_bt[i]) {
1904       case T_ARRAY:
1905         if (is_critical_native) {
1906           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1907           c_arg++;
1908           break;
1909         }
1910       case T_OBJECT:
1911         assert(!is_critical_native, "no oop arguments");
1912         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1913                     ((i == 0) && (!is_static)),
1914                     &receiver_offset);
1915         break;
1916       case T_VOID:
1917         break;
1918 
1919       case T_FLOAT:
1920         float_move(masm, in_regs[i], out_regs[c_arg]);
1921           break;
1922 
1923       case T_DOUBLE:
1924         assert( i + 1 < total_in_args &&
1925                 in_sig_bt[i + 1] == T_VOID &&
1926                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1927         double_move(masm, in_regs[i], out_regs[c_arg]);
1928         break;
1929 
1930       case T_LONG :
1931         long_move(masm, in_regs[i], out_regs[c_arg]);
1932         break;
1933 
1934       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1935 
1936       default:
1937         simple_move32(masm, in_regs[i], out_regs[c_arg]);
1938     }
1939   }
1940 
1941   // Pre-load a static method's oop into rsi.  Used both by locking code and
1942   // the normal JNI call code.
1943   if (method->is_static() && !is_critical_native) {
1944 
1945     //  load opp into a register
1946     __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1947 
1948     // Now handlize the static class mirror it's known not-null.
1949     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1950     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1951 
1952     // Now get the handle
1953     __ lea(oop_handle_reg, Address(rsp, klass_offset));
1954     // store the klass handle as second argument
1955     __ movptr(Address(rsp, wordSize), oop_handle_reg);
1956   }
1957 
1958   // Change state to native (we save the return address in the thread, since it might not
1959   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1960   // points into the right code segment. It does not have to be the correct return pc.
1961   // We use the same pc/oopMap repeatedly when we call out
1962 
1963   intptr_t the_pc = (intptr_t) __ pc();
1964   oop_maps->add_gc_map(the_pc - start, map);
1965 
1966   __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
1967 
1968 
1969   // We have all of the arguments setup at this point. We must not touch any register
1970   // argument registers at this point (what if we save/restore them there are no oop?
1971 
1972   {
1973     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1974     __ mov_metadata(rax, method());
1975     __ call_VM_leaf(
1976          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1977          thread, rax);
1978   }
1979 
1980   // RedefineClasses() tracing support for obsolete method entry
1981   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1982     __ mov_metadata(rax, method());
1983     __ call_VM_leaf(
1984          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1985          thread, rax);
1986   }
1987 
1988   // These are register definitions we need for locking/unlocking
1989   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1990   const Register obj_reg  = rcx;  // Will contain the oop
1991   const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1992 
1993   Label slow_path_lock;
1994   Label lock_done;
1995 
1996   // Lock a synchronized method
1997   if (method->is_synchronized()) {
1998     assert(!is_critical_native, "unhandled");
1999 
2000 
2001     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2002 
2003     // Get the handle (the 2nd argument)
2004     __ movptr(oop_handle_reg, Address(rsp, wordSize));
2005 
2006     // Get address of the box
2007 
2008     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
2009 
2010     // Load the oop from the handle
2011     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2012 
2013     if (UseBiasedLocking) {
2014       // Note that oop_handle_reg is trashed during this call
2015       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
2016     }
2017 
2018     // Load immediate 1 into swap_reg %rax,
2019     __ movptr(swap_reg, 1);
2020 
2021     // Load (object->mark() | 1) into swap_reg %rax,
2022     __ orptr(swap_reg, Address(obj_reg, 0));
2023 
2024     // Save (object->mark() | 1) into BasicLock's displaced header
2025     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2026 
2027     if (os::is_MP()) {
2028       __ lock();
2029     }
2030 
2031     // src -> dest iff dest == rax, else rax, <- dest
2032     // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
2033     __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2034     __ jcc(Assembler::equal, lock_done);
2035 
2036     // Test if the oopMark is an obvious stack pointer, i.e.,
2037     //  1) (mark & 3) == 0, and
2038     //  2) rsp <= mark < mark + os::pagesize()
2039     // These 3 tests can be done by evaluating the following
2040     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2041     // assuming both stack pointer and pagesize have their
2042     // least significant 2 bits clear.
2043     // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
2044 
2045     __ subptr(swap_reg, rsp);
2046     __ andptr(swap_reg, 3 - os::vm_page_size());
2047 
2048     // Save the test result, for recursive case, the result is zero
2049     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2050     __ jcc(Assembler::notEqual, slow_path_lock);
2051     // Slow path will re-enter here
2052     __ bind(lock_done);
2053 
2054     if (UseBiasedLocking) {
2055       // Re-fetch oop_handle_reg as we trashed it above
2056       __ movptr(oop_handle_reg, Address(rsp, wordSize));
2057     }
2058   }
2059 
2060 
2061   // Finally just about ready to make the JNI call
2062 
2063 
2064   // get JNIEnv* which is first argument to native
2065   if (!is_critical_native) {
2066     __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
2067     __ movptr(Address(rsp, 0), rdx);
2068   }
2069 
2070   // Now set thread in native
2071   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
2072 
2073   __ call(RuntimeAddress(native_func));
2074 
2075   // Verify or restore cpu control state after JNI call
2076   __ restore_cpu_control_state_after_jni();
2077 
2078   // WARNING - on Windows Java Natives use pascal calling convention and pop the
2079   // arguments off of the stack. We could just re-adjust the stack pointer here
2080   // and continue to do SP relative addressing but we instead switch to FP
2081   // relative addressing.
2082 
2083   // Unpack native results.
2084   switch (ret_type) {
2085   case T_BOOLEAN: __ c2bool(rax);            break;
2086   case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
2087   case T_BYTE   : __ sign_extend_byte (rax); break;
2088   case T_SHORT  : __ sign_extend_short(rax); break;
2089   case T_INT    : /* nothing to do */        break;
2090   case T_DOUBLE :
2091   case T_FLOAT  :
2092     // Result is in st0 we'll save as needed
2093     break;
2094   case T_ARRAY:                 // Really a handle
2095   case T_OBJECT:                // Really a handle
2096       break; // can't de-handlize until after safepoint check
2097   case T_VOID: break;
2098   case T_LONG: break;
2099   default       : ShouldNotReachHere();
2100   }
2101 
2102   // Switch thread to "native transition" state before reading the synchronization state.
2103   // This additional state is necessary because reading and testing the synchronization
2104   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2105   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2106   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2107   //     Thread A is resumed to finish this native method, but doesn't block here since it
2108   //     didn't see any synchronization is progress, and escapes.
2109   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2110 
2111   if(os::is_MP()) {
2112     if (UseMembar) {
2113       // Force this write out before the read below
2114       __ membar(Assembler::Membar_mask_bits(
2115            Assembler::LoadLoad | Assembler::LoadStore |
2116            Assembler::StoreLoad | Assembler::StoreStore));
2117     } else {
2118       // Write serialization page so VM thread can do a pseudo remote membar.
2119       // We use the current thread pointer to calculate a thread specific
2120       // offset to write to within the page. This minimizes bus traffic
2121       // due to cache line collision.
2122       __ serialize_memory(thread, rcx);
2123     }
2124   }
2125 
2126   if (AlwaysRestoreFPU) {
2127     // Make sure the control word is correct.
2128     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2129   }
2130 
2131   Label after_transition;
2132 
2133   // check for safepoint operation in progress and/or pending suspend requests
2134   { Label Continue;
2135 
2136     __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2137              SafepointSynchronize::_not_synchronized);
2138 
2139     Label L;
2140     __ jcc(Assembler::notEqual, L);
2141     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
2142     __ jcc(Assembler::equal, Continue);
2143     __ bind(L);
2144 
2145     // Don't use call_VM as it will see a possible pending exception and forward it
2146     // and never return here preventing us from clearing _last_native_pc down below.
2147     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2148     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2149     // by hand.
2150     //
2151     save_native_result(masm, ret_type, stack_slots);
2152     __ push(thread);
2153     if (!is_critical_native) {
2154       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2155                                               JavaThread::check_special_condition_for_native_trans)));
2156     } else {
2157       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2158                                               JavaThread::check_special_condition_for_native_trans_and_transition)));
2159     }
2160     __ increment(rsp, wordSize);
2161     // Restore any method result value
2162     restore_native_result(masm, ret_type, stack_slots);
2163 
2164     if (is_critical_native) {
2165       // The call above performed the transition to thread_in_Java so
2166       // skip the transition logic below.
2167       __ jmpb(after_transition);
2168     }
2169 
2170     __ bind(Continue);
2171   }
2172 
2173   // change thread state
2174   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
2175   __ bind(after_transition);
2176 
2177   Label reguard;
2178   Label reguard_done;
2179   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
2180   __ jcc(Assembler::equal, reguard);
2181 
2182   // slow path reguard  re-enters here
2183   __ bind(reguard_done);
2184 
2185   // Handle possible exception (will unlock if necessary)
2186 
2187   // native result if any is live
2188 
2189   // Unlock
2190   Label slow_path_unlock;
2191   Label unlock_done;
2192   if (method->is_synchronized()) {
2193 
2194     Label done;
2195 
2196     // Get locked oop from the handle we passed to jni
2197     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2198 
2199     if (UseBiasedLocking) {
2200       __ biased_locking_exit(obj_reg, rbx, done);
2201     }
2202 
2203     // Simple recursive lock?
2204 
2205     __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2206     __ jcc(Assembler::equal, done);
2207 
2208     // Must save rax, if if it is live now because cmpxchg must use it
2209     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2210       save_native_result(masm, ret_type, stack_slots);
2211     }
2212 
2213     //  get old displaced header
2214     __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2215 
2216     // get address of the stack lock
2217     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2218 
2219     // Atomic swap old header if oop still contains the stack lock
2220     if (os::is_MP()) {
2221     __ lock();
2222     }
2223 
2224     // src -> dest iff dest == rax, else rax, <- dest
2225     // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2226     __ cmpxchgptr(rbx, Address(obj_reg, 0));
2227     __ jcc(Assembler::notEqual, slow_path_unlock);
2228 
2229     // slow path re-enters here
2230     __ bind(unlock_done);
2231     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2232       restore_native_result(masm, ret_type, stack_slots);
2233     }
2234 
2235     __ bind(done);
2236 
2237   }
2238 
2239   {
2240     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2241     // Tell dtrace about this method exit
2242     save_native_result(masm, ret_type, stack_slots);
2243     __ mov_metadata(rax, method());
2244     __ call_VM_leaf(
2245          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2246          thread, rax);
2247     restore_native_result(masm, ret_type, stack_slots);
2248   }
2249 
2250   // We can finally stop using that last_Java_frame we setup ages ago
2251 
2252   __ reset_last_Java_frame(thread, false, true);
2253 
2254   // Unpack oop result
2255   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2256       Label L;
2257       __ cmpptr(rax, (int32_t)NULL_WORD);
2258       __ jcc(Assembler::equal, L);
2259       __ movptr(rax, Address(rax, 0));
2260       __ bind(L);
2261       __ verify_oop(rax);
2262   }
2263 
2264   if (!is_critical_native) {
2265     // reset handle block
2266     __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
2267     __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
2268 
2269     // Any exception pending?
2270     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2271     __ jcc(Assembler::notEqual, exception_pending);
2272   }
2273 
2274   // no exception, we're almost done
2275 
2276   // check that only result value is on FPU stack
2277   __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
2278 
2279   // Fixup floating pointer results so that result looks like a return from a compiled method
2280   if (ret_type == T_FLOAT) {
2281     if (UseSSE >= 1) {
2282       // Pop st0 and store as float and reload into xmm register
2283       __ fstp_s(Address(rbp, -4));
2284       __ movflt(xmm0, Address(rbp, -4));
2285     }
2286   } else if (ret_type == T_DOUBLE) {
2287     if (UseSSE >= 2) {
2288       // Pop st0 and store as double and reload into xmm register
2289       __ fstp_d(Address(rbp, -8));
2290       __ movdbl(xmm0, Address(rbp, -8));
2291     }
2292   }
2293 
2294   // Return
2295 
2296   __ leave();
2297   __ ret(0);
2298 
2299   // Unexpected paths are out of line and go here
2300 
2301   // Slow path locking & unlocking
2302   if (method->is_synchronized()) {
2303 
2304     // BEGIN Slow path lock
2305 
2306     __ bind(slow_path_lock);
2307 
2308     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2309     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2310     __ push(thread);
2311     __ push(lock_reg);
2312     __ push(obj_reg);
2313     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
2314     __ addptr(rsp, 3*wordSize);
2315 
2316 #ifdef ASSERT
2317     { Label L;
2318     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2319     __ jcc(Assembler::equal, L);
2320     __ stop("no pending exception allowed on exit from monitorenter");
2321     __ bind(L);
2322     }
2323 #endif
2324     __ jmp(lock_done);
2325 
2326     // END Slow path lock
2327 
2328     // BEGIN Slow path unlock
2329     __ bind(slow_path_unlock);
2330 
2331     // Slow path unlock
2332 
2333     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2334       save_native_result(masm, ret_type, stack_slots);
2335     }
2336     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2337 
2338     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2339     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2340 
2341 
2342     // should be a peal
2343     // +wordSize because of the push above
2344     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2345     __ push(rax);
2346 
2347     __ push(obj_reg);
2348     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2349     __ addptr(rsp, 2*wordSize);
2350 #ifdef ASSERT
2351     {
2352       Label L;
2353       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2354       __ jcc(Assembler::equal, L);
2355       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2356       __ bind(L);
2357     }
2358 #endif /* ASSERT */
2359 
2360     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2361 
2362     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2363       restore_native_result(masm, ret_type, stack_slots);
2364     }
2365     __ jmp(unlock_done);
2366     // END Slow path unlock
2367 
2368   }
2369 
2370   // SLOW PATH Reguard the stack if needed
2371 
2372   __ bind(reguard);
2373   save_native_result(masm, ret_type, stack_slots);
2374   {
2375     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2376   }
2377   restore_native_result(masm, ret_type, stack_slots);
2378   __ jmp(reguard_done);
2379 
2380 
2381   // BEGIN EXCEPTION PROCESSING
2382 
2383   if (!is_critical_native) {
2384     // Forward  the exception
2385     __ bind(exception_pending);
2386 
2387     // remove possible return value from FPU register stack
2388     __ empty_FPU_stack();
2389 
2390     // pop our frame
2391     __ leave();
2392     // and forward the exception
2393     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2394   }
2395 
2396   __ flush();
2397 
2398   nmethod *nm = nmethod::new_native_nmethod(method,
2399                                             compile_id,
2400                                             masm->code(),
2401                                             vep_offset,
2402                                             frame_complete,
2403                                             stack_slots / VMRegImpl::slots_per_word,
2404                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2405                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2406                                             oop_maps);
2407 
2408   if (is_critical_native) {
2409     nm->set_lazy_critical_native(true);
2410   }
2411 
2412   return nm;
2413 
2414 }
2415 
2416 #ifdef HAVE_DTRACE_H
2417 // ---------------------------------------------------------------------------
2418 // Generate a dtrace nmethod for a given signature.  The method takes arguments
2419 // in the Java compiled code convention, marshals them to the native
2420 // abi and then leaves nops at the position you would expect to call a native
2421 // function. When the probe is enabled the nops are replaced with a trap
2422 // instruction that dtrace inserts and the trace will cause a notification
2423 // to dtrace.
2424 //
2425 // The probes are only able to take primitive types and java/lang/String as
2426 // arguments.  No other java types are allowed. Strings are converted to utf8
2427 // strings so that from dtrace point of view java strings are converted to C
2428 // strings. There is an arbitrary fixed limit on the total space that a method
2429 // can use for converting the strings. (256 chars per string in the signature).
2430 // So any java string larger then this is truncated.
2431 
2432 nmethod *SharedRuntime::generate_dtrace_nmethod(
2433     MacroAssembler *masm, methodHandle method) {
2434 
2435   // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2436   // be single threaded in this method.
2437   assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2438 
2439   // Fill in the signature array, for the calling-convention call.
2440   int total_args_passed = method->size_of_parameters();
2441 
2442   BasicType* in_sig_bt  = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2443   VMRegPair  *in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2444 
2445   // The signature we are going to use for the trap that dtrace will see
2446   // java/lang/String is converted. We drop "this" and any other object
2447   // is converted to NULL.  (A one-slot java/lang/Long object reference
2448   // is converted to a two-slot long, which is why we double the allocation).
2449   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2450   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2451 
2452   int i=0;
2453   int total_strings = 0;
2454   int first_arg_to_pass = 0;
2455   int total_c_args = 0;
2456 
2457   if( !method->is_static() ) {  // Pass in receiver first
2458     in_sig_bt[i++] = T_OBJECT;
2459     first_arg_to_pass = 1;
2460   }
2461 
2462   // We need to convert the java args to where a native (non-jni) function
2463   // would expect them. To figure out where they go we convert the java
2464   // signature to a C signature.
2465 
2466   SignatureStream ss(method->signature());
2467   for ( ; !ss.at_return_type(); ss.next()) {
2468     BasicType bt = ss.type();
2469     in_sig_bt[i++] = bt;  // Collect remaining bits of signature
2470     out_sig_bt[total_c_args++] = bt;
2471     if( bt == T_OBJECT) {
2472       Symbol* s = ss.as_symbol_or_null();   // symbol is created
2473       if (s == vmSymbols::java_lang_String()) {
2474         total_strings++;
2475         out_sig_bt[total_c_args-1] = T_ADDRESS;
2476       } else if (s == vmSymbols::java_lang_Boolean() ||
2477                  s == vmSymbols::java_lang_Character() ||
2478                  s == vmSymbols::java_lang_Byte() ||
2479                  s == vmSymbols::java_lang_Short() ||
2480                  s == vmSymbols::java_lang_Integer() ||
2481                  s == vmSymbols::java_lang_Float()) {
2482         out_sig_bt[total_c_args-1] = T_INT;
2483       } else if (s == vmSymbols::java_lang_Long() ||
2484                  s == vmSymbols::java_lang_Double()) {
2485         out_sig_bt[total_c_args-1] = T_LONG;
2486         out_sig_bt[total_c_args++] = T_VOID;
2487       }
2488     } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2489       in_sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
2490       out_sig_bt[total_c_args++] = T_VOID;
2491     }
2492   }
2493 
2494   assert(i==total_args_passed, "validly parsed signature");
2495 
2496   // Now get the compiled-Java layout as input arguments
2497   int comp_args_on_stack;
2498   comp_args_on_stack = SharedRuntime::java_calling_convention(
2499       in_sig_bt, in_regs, total_args_passed, false);
2500 
2501   // Now figure out where the args must be stored and how much stack space
2502   // they require (neglecting out_preserve_stack_slots).
2503 
2504   int out_arg_slots;
2505   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2506 
2507   // Calculate the total number of stack slots we will need.
2508 
2509   // First count the abi requirement plus all of the outgoing args
2510   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2511 
2512   // Now space for the string(s) we must convert
2513 
2514   int* string_locs   = NEW_RESOURCE_ARRAY(int, total_strings + 1);
2515   for (i = 0; i < total_strings ; i++) {
2516     string_locs[i] = stack_slots;
2517     stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
2518   }
2519 
2520   // + 2 for return address (which we own) and saved rbp,
2521 
2522   stack_slots += 2;
2523 
2524   // Ok The space we have allocated will look like:
2525   //
2526   //
2527   // FP-> |                     |
2528   //      |---------------------|
2529   //      | string[n]           |
2530   //      |---------------------| <- string_locs[n]
2531   //      | string[n-1]         |
2532   //      |---------------------| <- string_locs[n-1]
2533   //      | ...                 |
2534   //      | ...                 |
2535   //      |---------------------| <- string_locs[1]
2536   //      | string[0]           |
2537   //      |---------------------| <- string_locs[0]
2538   //      | outbound memory     |
2539   //      | based arguments     |
2540   //      |                     |
2541   //      |---------------------|
2542   //      |                     |
2543   // SP-> | out_preserved_slots |
2544   //
2545   //
2546 
2547   // Now compute actual number of stack words we need rounding to make
2548   // stack properly aligned.
2549   stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
2550 
2551   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2552 
2553   intptr_t start = (intptr_t)__ pc();
2554 
2555   // First thing make an ic check to see if we should even be here
2556 
2557   // We are free to use all registers as temps without saving them and
2558   // restoring them except rbp. rbp, is the only callee save register
2559   // as far as the interpreter and the compiler(s) are concerned.
2560 
2561   const Register ic_reg = rax;
2562   const Register receiver = rcx;
2563   Label hit;
2564   Label exception_pending;
2565 
2566 
2567   __ verify_oop(receiver);
2568   __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
2569   __ jcc(Assembler::equal, hit);
2570 
2571   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2572 
2573   // verified entry must be aligned for code patching.
2574   // and the first 5 bytes must be in the same cache line
2575   // if we align at 8 then we will be sure 5 bytes are in the same line
2576   __ align(8);
2577 
2578   __ bind(hit);
2579 
2580   int vep_offset = ((intptr_t)__ pc()) - start;
2581 
2582 
2583   // The instruction at the verified entry point must be 5 bytes or longer
2584   // because it can be patched on the fly by make_non_entrant. The stack bang
2585   // instruction fits that requirement.
2586 
2587   // Generate stack overflow check
2588 
2589 
2590   if (UseStackBanging) {
2591     if (stack_size <= StackShadowPages*os::vm_page_size()) {
2592       __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2593     } else {
2594       __ movl(rax, stack_size);
2595       __ bang_stack_size(rax, rbx);
2596     }
2597   } else {
2598     // need a 5 byte instruction to allow MT safe patching to non-entrant
2599     __ fat_nop();
2600   }
2601 
2602   assert(((int)__ pc() - start - vep_offset) >= 5,
2603          "valid size for make_non_entrant");
2604 
2605   // Generate a new frame for the wrapper.
2606   __ enter();
2607 
2608   // -2 because return address is already present and so is saved rbp,
2609   if (stack_size - 2*wordSize != 0) {
2610     __ subl(rsp, stack_size - 2*wordSize);
2611   }
2612 
2613   // Frame is now completed as far a size and linkage.
2614 
2615   int frame_complete = ((intptr_t)__ pc()) - start;
2616 
2617   // First thing we do store all the args as if we are doing the call.
2618   // Since the C calling convention is stack based that ensures that
2619   // all the Java register args are stored before we need to convert any
2620   // string we might have.
2621 
2622   int sid = 0;
2623   int c_arg, j_arg;
2624   int string_reg = 0;
2625 
2626   for (j_arg = first_arg_to_pass, c_arg = 0 ;
2627        j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2628 
2629     VMRegPair src = in_regs[j_arg];
2630     VMRegPair dst = out_regs[c_arg];
2631     assert(dst.first()->is_stack() || in_sig_bt[j_arg] == T_VOID,
2632            "stack based abi assumed");
2633 
2634     switch (in_sig_bt[j_arg]) {
2635 
2636       case T_ARRAY:
2637       case T_OBJECT:
2638         if (out_sig_bt[c_arg] == T_ADDRESS) {
2639           // Any register based arg for a java string after the first
2640           // will be destroyed by the call to get_utf so we store
2641           // the original value in the location the utf string address
2642           // will eventually be stored.
2643           if (src.first()->is_reg()) {
2644             if (string_reg++ != 0) {
2645               simple_move32(masm, src, dst);
2646             }
2647           }
2648         } else if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2649           // need to unbox a one-word value
2650           Register in_reg = rax;
2651           if ( src.first()->is_reg() ) {
2652             in_reg = src.first()->as_Register();
2653           } else {
2654             simple_move32(masm, src, in_reg->as_VMReg());
2655           }
2656           Label skipUnbox;
2657           __ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD);
2658           if ( out_sig_bt[c_arg] == T_LONG ) {
2659             __ movl(Address(rsp, reg2offset_out(dst.second())), NULL_WORD);
2660           }
2661           __ testl(in_reg, in_reg);
2662           __ jcc(Assembler::zero, skipUnbox);
2663           assert(dst.first()->is_stack() &&
2664                  (!dst.second()->is_valid() || dst.second()->is_stack()),
2665                  "value(s) must go into stack slots");
2666 
2667           BasicType bt = out_sig_bt[c_arg];
2668           int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2669           if ( bt == T_LONG ) {
2670             __ movl(rbx, Address(in_reg,
2671                                  box_offset + VMRegImpl::stack_slot_size));
2672             __ movl(Address(rsp, reg2offset_out(dst.second())), rbx);
2673           }
2674           __ movl(in_reg,  Address(in_reg, box_offset));
2675           __ movl(Address(rsp, reg2offset_out(dst.first())), in_reg);
2676           __ bind(skipUnbox);
2677         } else {
2678           // Convert the arg to NULL
2679           __ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD);
2680         }
2681         if (out_sig_bt[c_arg] == T_LONG) {
2682           assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2683           ++c_arg; // Move over the T_VOID To keep the loop indices in sync
2684         }
2685         break;
2686 
2687       case T_VOID:
2688         break;
2689 
2690       case T_FLOAT:
2691         float_move(masm, src, dst);
2692         break;
2693 
2694       case T_DOUBLE:
2695         assert( j_arg + 1 < total_args_passed &&
2696                 in_sig_bt[j_arg + 1] == T_VOID, "bad arg list");
2697         double_move(masm, src, dst);
2698         break;
2699 
2700       case T_LONG :
2701         long_move(masm, src, dst);
2702         break;
2703 
2704       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2705 
2706       default:
2707         simple_move32(masm, src, dst);
2708     }
2709   }
2710 
2711   // Now we must convert any string we have to utf8
2712   //
2713 
2714   for (sid = 0, j_arg = first_arg_to_pass, c_arg = 0 ;
2715        sid < total_strings ; j_arg++, c_arg++ ) {
2716 
2717     if (out_sig_bt[c_arg] == T_ADDRESS) {
2718 
2719       Address utf8_addr = Address(
2720           rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
2721       __ leal(rax, utf8_addr);
2722 
2723       // The first string we find might still be in the original java arg
2724       // register
2725       VMReg orig_loc = in_regs[j_arg].first();
2726       Register string_oop;
2727 
2728       // This is where the argument will eventually reside
2729       Address dest = Address(rsp, reg2offset_out(out_regs[c_arg].first()));
2730 
2731       if (sid == 1 && orig_loc->is_reg()) {
2732         string_oop = orig_loc->as_Register();
2733         assert(string_oop != rax, "smashed arg");
2734       } else {
2735 
2736         if (orig_loc->is_reg()) {
2737           // Get the copy of the jls object
2738           __ movl(rcx, dest);
2739         } else {
2740           // arg is still in the original location
2741           __ movl(rcx, Address(rbp, reg2offset_in(orig_loc)));
2742         }
2743         string_oop = rcx;
2744 
2745       }
2746       Label nullString;
2747       __ movl(dest, NULL_WORD);
2748       __ testl(string_oop, string_oop);
2749       __ jcc(Assembler::zero, nullString);
2750 
2751       // Now we can store the address of the utf string as the argument
2752       __ movl(dest, rax);
2753 
2754       // And do the conversion
2755       __ call_VM_leaf(CAST_FROM_FN_PTR(
2756              address, SharedRuntime::get_utf), string_oop, rax);
2757       __ bind(nullString);
2758     }
2759 
2760     if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
2761       assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2762       ++c_arg; // Move over the T_VOID To keep the loop indices in sync
2763     }
2764   }
2765 
2766 
2767   // Ok now we are done. Need to place the nop that dtrace wants in order to
2768   // patch in the trap
2769 
2770   int patch_offset = ((intptr_t)__ pc()) - start;
2771 
2772   __ nop();
2773 
2774 
2775   // Return
2776 
2777   __ leave();
2778   __ ret(0);
2779 
2780   __ flush();
2781 
2782   nmethod *nm = nmethod::new_dtrace_nmethod(
2783       method, masm->code(), vep_offset, patch_offset, frame_complete,
2784       stack_slots / VMRegImpl::slots_per_word);
2785   return nm;
2786 
2787 }
2788 
2789 #endif // HAVE_DTRACE_H
2790 
2791 // this function returns the adjust size (in number of words) to a c2i adapter
2792 // activation for use during deoptimization
2793 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2794   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2795 }
2796 
2797 
2798 uint SharedRuntime::out_preserve_stack_slots() {
2799   return 0;
2800 }
2801 
2802 //------------------------------generate_deopt_blob----------------------------
2803 void SharedRuntime::generate_deopt_blob() {
2804   // allocate space for the code
2805   ResourceMark rm;
2806   // setup code generation tools
2807   CodeBuffer   buffer("deopt_blob", 1024, 1024);
2808   MacroAssembler* masm = new MacroAssembler(&buffer);
2809   int frame_size_in_words;
2810   OopMap* map = NULL;
2811   // Account for the extra args we place on the stack
2812   // by the time we call fetch_unroll_info
2813   const int additional_words = 2; // deopt kind, thread
2814 
2815   OopMapSet *oop_maps = new OopMapSet();
2816 
2817   // -------------
2818   // This code enters when returning to a de-optimized nmethod.  A return
2819   // address has been pushed on the the stack, and return values are in
2820   // registers.
2821   // If we are doing a normal deopt then we were called from the patched
2822   // nmethod from the point we returned to the nmethod. So the return
2823   // address on the stack is wrong by NativeCall::instruction_size
2824   // We will adjust the value to it looks like we have the original return
2825   // address on the stack (like when we eagerly deoptimized).
2826   // In the case of an exception pending with deoptimized then we enter
2827   // with a return address on the stack that points after the call we patched
2828   // into the exception handler. We have the following register state:
2829   //    rax,: exception
2830   //    rbx,: exception handler
2831   //    rdx: throwing pc
2832   // So in this case we simply jam rdx into the useless return address and
2833   // the stack looks just like we want.
2834   //
2835   // At this point we need to de-opt.  We save the argument return
2836   // registers.  We call the first C routine, fetch_unroll_info().  This
2837   // routine captures the return values and returns a structure which
2838   // describes the current frame size and the sizes of all replacement frames.
2839   // The current frame is compiled code and may contain many inlined
2840   // functions, each with their own JVM state.  We pop the current frame, then
2841   // push all the new frames.  Then we call the C routine unpack_frames() to
2842   // populate these frames.  Finally unpack_frames() returns us the new target
2843   // address.  Notice that callee-save registers are BLOWN here; they have
2844   // already been captured in the vframeArray at the time the return PC was
2845   // patched.
2846   address start = __ pc();
2847   Label cont;
2848 
2849   // Prolog for non exception case!
2850 
2851   // Save everything in sight.
2852 
2853   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2854   // Normal deoptimization
2855   __ push(Deoptimization::Unpack_deopt);
2856   __ jmp(cont);
2857 
2858   int reexecute_offset = __ pc() - start;
2859 
2860   // Reexecute case
2861   // return address is the pc describes what bci to do re-execute at
2862 
2863   // No need to update map as each call to save_live_registers will produce identical oopmap
2864   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2865 
2866   __ push(Deoptimization::Unpack_reexecute);
2867   __ jmp(cont);
2868 
2869   int exception_offset = __ pc() - start;
2870 
2871   // Prolog for exception case
2872 
2873   // all registers are dead at this entry point, except for rax, and
2874   // rdx which contain the exception oop and exception pc
2875   // respectively.  Set them in TLS and fall thru to the
2876   // unpack_with_exception_in_tls entry point.
2877 
2878   __ get_thread(rdi);
2879   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2880   __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2881 
2882   int exception_in_tls_offset = __ pc() - start;
2883 
2884   // new implementation because exception oop is now passed in JavaThread
2885 
2886   // Prolog for exception case
2887   // All registers must be preserved because they might be used by LinearScan
2888   // Exceptiop oop and throwing PC are passed in JavaThread
2889   // tos: stack at point of call to method that threw the exception (i.e. only
2890   // args are on the stack, no return address)
2891 
2892   // make room on stack for the return address
2893   // It will be patched later with the throwing pc. The correct value is not
2894   // available now because loading it from memory would destroy registers.
2895   __ push(0);
2896 
2897   // Save everything in sight.
2898 
2899   // No need to update map as each call to save_live_registers will produce identical oopmap
2900   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2901 
2902   // Now it is safe to overwrite any register
2903 
2904   // store the correct deoptimization type
2905   __ push(Deoptimization::Unpack_exception);
2906 
2907   // load throwing pc from JavaThread and patch it as the return address
2908   // of the current frame. Then clear the field in JavaThread
2909   __ get_thread(rdi);
2910   __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2911   __ movptr(Address(rbp, wordSize), rdx);
2912   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2913 
2914 #ifdef ASSERT
2915   // verify that there is really an exception oop in JavaThread
2916   __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2917   __ verify_oop(rax);
2918 
2919   // verify that there is no pending exception
2920   Label no_pending_exception;
2921   __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2922   __ testptr(rax, rax);
2923   __ jcc(Assembler::zero, no_pending_exception);
2924   __ stop("must not have pending exception here");
2925   __ bind(no_pending_exception);
2926 #endif
2927 
2928   __ bind(cont);
2929 
2930   // Compiled code leaves the floating point stack dirty, empty it.
2931   __ empty_FPU_stack();
2932 
2933 
2934   // Call C code.  Need thread and this frame, but NOT official VM entry
2935   // crud.  We cannot block on this call, no GC can happen.
2936   __ get_thread(rcx);
2937   __ push(rcx);
2938   // fetch_unroll_info needs to call last_java_frame()
2939   __ set_last_Java_frame(rcx, noreg, noreg, NULL);
2940 
2941   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2942 
2943   // Need to have an oopmap that tells fetch_unroll_info where to
2944   // find any register it might need.
2945 
2946   oop_maps->add_gc_map( __ pc()-start, map);
2947 
2948   // Discard arg to fetch_unroll_info
2949   __ pop(rcx);
2950 
2951   __ get_thread(rcx);
2952   __ reset_last_Java_frame(rcx, false, false);
2953 
2954   // Load UnrollBlock into EDI
2955   __ mov(rdi, rax);
2956 
2957   // Move the unpack kind to a safe place in the UnrollBlock because
2958   // we are very short of registers
2959 
2960   Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2961   // retrieve the deopt kind from where we left it.
2962   __ pop(rax);
2963   __ movl(unpack_kind, rax);                      // save the unpack_kind value
2964 
2965    Label noException;
2966   __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
2967   __ jcc(Assembler::notEqual, noException);
2968   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2969   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2970   __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2971   __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2972 
2973   __ verify_oop(rax);
2974 
2975   // Overwrite the result registers with the exception results.
2976   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2977   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2978 
2979   __ bind(noException);
2980 
2981   // Stack is back to only having register save data on the stack.
2982   // Now restore the result registers. Everything else is either dead or captured
2983   // in the vframeArray.
2984 
2985   RegisterSaver::restore_result_registers(masm);
2986 
2987   // Non standard control word may be leaked out through a safepoint blob, and we can
2988   // deopt at a poll point with the non standard control word. However, we should make
2989   // sure the control word is correct after restore_result_registers.
2990   __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2991 
2992   // All of the register save area has been popped of the stack. Only the
2993   // return address remains.
2994 
2995   // Pop all the frames we must move/replace.
2996   //
2997   // Frame picture (youngest to oldest)
2998   // 1: self-frame (no frame link)
2999   // 2: deopting frame  (no frame link)
3000   // 3: caller of deopting frame (could be compiled/interpreted).
3001   //
3002   // Note: by leaving the return address of self-frame on the stack
3003   // and using the size of frame 2 to adjust the stack
3004   // when we are done the return to frame 3 will still be on the stack.
3005 
3006   // Pop deoptimized frame
3007   __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3008 
3009   // sp should be pointing at the return address to the caller (3)
3010 
3011   // Pick up the initial fp we should save
3012   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3013   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3014 
3015   // Stack bang to make sure there's enough room for these interpreter frames.
3016   if (UseStackBanging) {
3017     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3018     __ bang_stack_size(rbx, rcx);
3019   }
3020 
3021   // Load array of frame pcs into ECX
3022   __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3023 
3024   __ pop(rsi); // trash the old pc
3025 
3026   // Load array of frame sizes into ESI
3027   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3028 
3029   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
3030 
3031   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3032   __ movl(counter, rbx);
3033 
3034   // Now adjust the caller's stack to make up for the extra locals
3035   // but record the original sp so that we can save it in the skeletal interpreter
3036   // frame and the stack walking of interpreter_sender will get the unextended sp
3037   // value and not the "real" sp value.
3038 
3039   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
3040   __ movptr(sp_temp, rsp);
3041   __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
3042   __ subptr(rsp, rbx);
3043 
3044   // Push interpreter frames in a loop
3045   Label loop;
3046   __ bind(loop);
3047   __ movptr(rbx, Address(rsi, 0));      // Load frame size
3048 #ifdef CC_INTERP
3049   __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
3050 #ifdef ASSERT
3051   __ push(0xDEADDEAD);                  // Make a recognizable pattern
3052   __ push(0xDEADDEAD);
3053 #else /* ASSERT */
3054   __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
3055 #endif /* ASSERT */
3056 #else /* CC_INTERP */
3057   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
3058 #endif /* CC_INTERP */
3059   __ pushptr(Address(rcx, 0));          // save return address
3060   __ enter();                           // save old & set new rbp,
3061   __ subptr(rsp, rbx);                  // Prolog!
3062   __ movptr(rbx, sp_temp);              // sender's sp
3063 #ifdef CC_INTERP
3064   __ movptr(Address(rbp,
3065                   -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3066           rbx); // Make it walkable
3067 #else /* CC_INTERP */
3068   // This value is corrected by layout_activation_impl
3069   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
3070   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
3071 #endif /* CC_INTERP */
3072   __ movptr(sp_temp, rsp);              // pass to next frame
3073   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
3074   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
3075   __ decrementl(counter);             // decrement counter
3076   __ jcc(Assembler::notZero, loop);
3077   __ pushptr(Address(rcx, 0));          // save final return address
3078 
3079   // Re-push self-frame
3080   __ enter();                           // save old & set new rbp,
3081 
3082   //  Return address and rbp, are in place
3083   // We'll push additional args later. Just allocate a full sized
3084   // register save area
3085   __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
3086 
3087   // Restore frame locals after moving the frame
3088   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
3089   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
3090   __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
3091   if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
3092   if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
3093 
3094   // Set up the args to unpack_frame
3095 
3096   __ pushl(unpack_kind);                     // get the unpack_kind value
3097   __ get_thread(rcx);
3098   __ push(rcx);
3099 
3100   // set last_Java_sp, last_Java_fp
3101   __ set_last_Java_frame(rcx, noreg, rbp, NULL);
3102 
3103   // Call C code.  Need thread but NOT official VM entry
3104   // crud.  We cannot block on this call, no GC can happen.  Call should
3105   // restore return values to their stack-slots with the new SP.
3106   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3107   // Set an oopmap for the call site
3108   oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
3109 
3110   // rax, contains the return result type
3111   __ push(rax);
3112 
3113   __ get_thread(rcx);
3114   __ reset_last_Java_frame(rcx, false, false);
3115 
3116   // Collect return values
3117   __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
3118   __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
3119 
3120   // Clear floating point stack before returning to interpreter
3121   __ empty_FPU_stack();
3122 
3123   // Check if we should push the float or double return value.
3124   Label results_done, yes_double_value;
3125   __ cmpl(Address(rsp, 0), T_DOUBLE);
3126   __ jcc (Assembler::zero, yes_double_value);
3127   __ cmpl(Address(rsp, 0), T_FLOAT);
3128   __ jcc (Assembler::notZero, results_done);
3129 
3130   // return float value as expected by interpreter
3131   if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
3132   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
3133   __ jmp(results_done);
3134 
3135   // return double value as expected by interpreter
3136   __ bind(yes_double_value);
3137   if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
3138   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
3139 
3140   __ bind(results_done);
3141 
3142   // Pop self-frame.
3143   __ leave();                              // Epilog!
3144 
3145   // Jump to interpreter
3146   __ ret(0);
3147 
3148   // -------------
3149   // make sure all code is generated
3150   masm->flush();
3151 
3152   _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3153   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3154 }
3155 
3156 
3157 #ifdef COMPILER2
3158 //------------------------------generate_uncommon_trap_blob--------------------
3159 void SharedRuntime::generate_uncommon_trap_blob() {
3160   // allocate space for the code
3161   ResourceMark rm;
3162   // setup code generation tools
3163   CodeBuffer   buffer("uncommon_trap_blob", 512, 512);
3164   MacroAssembler* masm = new MacroAssembler(&buffer);
3165 
3166   enum frame_layout {
3167     arg0_off,      // thread                     sp + 0 // Arg location for
3168     arg1_off,      // unloaded_class_index       sp + 1 // calling C
3169     // The frame sender code expects that rbp will be in the "natural" place and
3170     // will override any oopMap setting for it. We must therefore force the layout
3171     // so that it agrees with the frame sender code.
3172     rbp_off,       // callee saved register      sp + 2
3173     return_off,    // slot for return address    sp + 3
3174     framesize
3175   };
3176 
3177   address start = __ pc();
3178 
3179   if (UseRTMLocking) {
3180     // Abort RTM transaction before possible nmethod deoptimization.
3181     __ xabort(0);
3182   }
3183 
3184   // Push self-frame.
3185   __ subptr(rsp, return_off*wordSize);     // Epilog!
3186 
3187   // rbp, is an implicitly saved callee saved register (i.e. the calling
3188   // convention will save restore it in prolog/epilog) Other than that
3189   // there are no callee save registers no that adapter frames are gone.
3190   __ movptr(Address(rsp, rbp_off*wordSize), rbp);
3191 
3192   // Clear the floating point exception stack
3193   __ empty_FPU_stack();
3194 
3195   // set last_Java_sp
3196   __ get_thread(rdx);
3197   __ set_last_Java_frame(rdx, noreg, noreg, NULL);
3198 
3199   // Call C code.  Need thread but NOT official VM entry
3200   // crud.  We cannot block on this call, no GC can happen.  Call should
3201   // capture callee-saved registers as well as return values.
3202   __ movptr(Address(rsp, arg0_off*wordSize), rdx);
3203   // argument already in ECX
3204   __ movl(Address(rsp, arg1_off*wordSize),rcx);
3205   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3206 
3207   // Set an oopmap for the call site
3208   OopMapSet *oop_maps = new OopMapSet();
3209   OopMap* map =  new OopMap( framesize, 0 );
3210   // No oopMap for rbp, it is known implicitly
3211 
3212   oop_maps->add_gc_map( __ pc()-start, map);
3213 
3214   __ get_thread(rcx);
3215 
3216   __ reset_last_Java_frame(rcx, false, false);
3217 
3218   // Load UnrollBlock into EDI
3219   __ movptr(rdi, rax);
3220 
3221   // Pop all the frames we must move/replace.
3222   //
3223   // Frame picture (youngest to oldest)
3224   // 1: self-frame (no frame link)
3225   // 2: deopting frame  (no frame link)
3226   // 3: caller of deopting frame (could be compiled/interpreted).
3227 
3228   // Pop self-frame.  We have no frame, and must rely only on EAX and ESP.
3229   __ addptr(rsp,(framesize-1)*wordSize);     // Epilog!
3230 
3231   // Pop deoptimized frame
3232   __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3233   __ addptr(rsp, rcx);
3234 
3235   // sp should be pointing at the return address to the caller (3)
3236 
3237   // Pick up the initial fp we should save
3238   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3239   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3240 
3241   // Stack bang to make sure there's enough room for these interpreter frames.
3242   if (UseStackBanging) {
3243     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3244     __ bang_stack_size(rbx, rcx);
3245   }
3246 
3247 
3248   // Load array of frame pcs into ECX
3249   __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3250 
3251   __ pop(rsi); // trash the pc
3252 
3253   // Load array of frame sizes into ESI
3254   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3255 
3256   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
3257 
3258   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3259   __ movl(counter, rbx);
3260 
3261   // Now adjust the caller's stack to make up for the extra locals
3262   // but record the original sp so that we can save it in the skeletal interpreter
3263   // frame and the stack walking of interpreter_sender will get the unextended sp
3264   // value and not the "real" sp value.
3265 
3266   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
3267   __ movptr(sp_temp, rsp);
3268   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
3269   __ subptr(rsp, rbx);
3270 
3271   // Push interpreter frames in a loop
3272   Label loop;
3273   __ bind(loop);
3274   __ movptr(rbx, Address(rsi, 0));      // Load frame size
3275 #ifdef CC_INTERP
3276   __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
3277 #ifdef ASSERT
3278   __ push(0xDEADDEAD);                  // Make a recognizable pattern
3279   __ push(0xDEADDEAD);                  // (parm to RecursiveInterpreter...)
3280 #else /* ASSERT */
3281   __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
3282 #endif /* ASSERT */
3283 #else /* CC_INTERP */
3284   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
3285 #endif /* CC_INTERP */
3286   __ pushptr(Address(rcx, 0));          // save return address
3287   __ enter();                           // save old & set new rbp,
3288   __ subptr(rsp, rbx);                  // Prolog!
3289   __ movptr(rbx, sp_temp);              // sender's sp
3290 #ifdef CC_INTERP
3291   __ movptr(Address(rbp,
3292                   -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3293           rbx); // Make it walkable
3294 #else /* CC_INTERP */
3295   // This value is corrected by layout_activation_impl
3296   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
3297   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
3298 #endif /* CC_INTERP */
3299   __ movptr(sp_temp, rsp);              // pass to next frame
3300   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
3301   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
3302   __ decrementl(counter);             // decrement counter
3303   __ jcc(Assembler::notZero, loop);
3304   __ pushptr(Address(rcx, 0));            // save final return address
3305 
3306   // Re-push self-frame
3307   __ enter();                           // save old & set new rbp,
3308   __ subptr(rsp, (framesize-2) * wordSize);   // Prolog!
3309 
3310 
3311   // set last_Java_sp, last_Java_fp
3312   __ get_thread(rdi);
3313   __ set_last_Java_frame(rdi, noreg, rbp, NULL);
3314 
3315   // Call C code.  Need thread but NOT official VM entry
3316   // crud.  We cannot block on this call, no GC can happen.  Call should
3317   // restore return values to their stack-slots with the new SP.
3318   __ movptr(Address(rsp,arg0_off*wordSize),rdi);
3319   __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
3320   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3321   // Set an oopmap for the call site
3322   oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
3323 
3324   __ get_thread(rdi);
3325   __ reset_last_Java_frame(rdi, true, false);
3326 
3327   // Pop self-frame.
3328   __ leave();     // Epilog!
3329 
3330   // Jump to interpreter
3331   __ ret(0);
3332 
3333   // -------------
3334   // make sure all code is generated
3335   masm->flush();
3336 
3337    _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
3338 }
3339 #endif // COMPILER2
3340 
3341 //------------------------------generate_handler_blob------
3342 //
3343 // Generate a special Compile2Runtime blob that saves all registers,
3344 // setup oopmap, and calls safepoint code to stop the compiled code for
3345 // a safepoint.
3346 //
3347 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3348 
3349   // Account for thread arg in our frame
3350   const int additional_words = 1;
3351   int frame_size_in_words;
3352 
3353   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3354 
3355   ResourceMark rm;
3356   OopMapSet *oop_maps = new OopMapSet();
3357   OopMap* map;
3358 
3359   // allocate space for the code
3360   // setup code generation tools
3361   CodeBuffer   buffer("handler_blob", 1024, 512);
3362   MacroAssembler* masm = new MacroAssembler(&buffer);
3363 
3364   const Register java_thread = rdi; // callee-saved for VC++
3365   address start   = __ pc();
3366   address call_pc = NULL;
3367   bool cause_return = (poll_type == POLL_AT_RETURN);
3368   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3369 
3370   if (UseRTMLocking) {
3371     // Abort RTM transaction before calling runtime
3372     // because critical section will be large and will be
3373     // aborted anyway. Also nmethod could be deoptimized.
3374     __ xabort(0);
3375   }
3376 
3377   // If cause_return is true we are at a poll_return and there is
3378   // the return address on the stack to the caller on the nmethod
3379   // that is safepoint. We can leave this return on the stack and
3380   // effectively complete the return and safepoint in the caller.
3381   // Otherwise we push space for a return address that the safepoint
3382   // handler will install later to make the stack walking sensible.
3383   if (!cause_return)
3384     __ push(rbx);  // Make room for return address (or push it again)
3385 
3386   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
3387 
3388   // The following is basically a call_VM. However, we need the precise
3389   // address of the call in order to generate an oopmap. Hence, we do all the
3390   // work ourselves.
3391 
3392   // Push thread argument and setup last_Java_sp
3393   __ get_thread(java_thread);
3394   __ push(java_thread);
3395   __ set_last_Java_frame(java_thread, noreg, noreg, NULL);
3396 
3397   // if this was not a poll_return then we need to correct the return address now.
3398   if (!cause_return) {
3399     __ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
3400     __ movptr(Address(rbp, wordSize), rax);
3401   }
3402 
3403   // do the call
3404   __ call(RuntimeAddress(call_ptr));
3405 
3406   // Set an oopmap for the call site.  This oopmap will map all
3407   // oop-registers and debug-info registers as callee-saved.  This
3408   // will allow deoptimization at this safepoint to find all possible
3409   // debug-info recordings, as well as let GC find all oops.
3410 
3411   oop_maps->add_gc_map( __ pc() - start, map);
3412 
3413   // Discard arg
3414   __ pop(rcx);
3415 
3416   Label noException;
3417 
3418   // Clear last_Java_sp again
3419   __ get_thread(java_thread);
3420   __ reset_last_Java_frame(java_thread, false, false);
3421 
3422   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3423   __ jcc(Assembler::equal, noException);
3424 
3425   // Exception pending
3426   RegisterSaver::restore_live_registers(masm, save_vectors);
3427 
3428   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3429 
3430   __ bind(noException);
3431 
3432   // Normal exit, register restoring and exit
3433   RegisterSaver::restore_live_registers(masm, save_vectors);
3434 
3435   __ ret(0);
3436 
3437   // make sure all code is generated
3438   masm->flush();
3439 
3440   // Fill-out other meta info
3441   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3442 }
3443 
3444 //
3445 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3446 //
3447 // Generate a stub that calls into vm to find out the proper destination
3448 // of a java call. All the argument registers are live at this point
3449 // but since this is generic code we don't know what they are and the caller
3450 // must do any gc of the args.
3451 //
3452 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3453   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3454 
3455   // allocate space for the code
3456   ResourceMark rm;
3457 
3458   CodeBuffer buffer(name, 1000, 512);
3459   MacroAssembler* masm                = new MacroAssembler(&buffer);
3460 
3461   int frame_size_words;
3462   enum frame_layout {
3463                 thread_off,
3464                 extra_words };
3465 
3466   OopMapSet *oop_maps = new OopMapSet();
3467   OopMap* map = NULL;
3468 
3469   int start = __ offset();
3470 
3471   map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
3472 
3473   int frame_complete = __ offset();
3474 
3475   const Register thread = rdi;
3476   __ get_thread(rdi);
3477 
3478   __ push(thread);
3479   __ set_last_Java_frame(thread, noreg, rbp, NULL);
3480 
3481   __ call(RuntimeAddress(destination));
3482 
3483 
3484   // Set an oopmap for the call site.
3485   // We need this not only for callee-saved registers, but also for volatile
3486   // registers that the compiler might be keeping live across a safepoint.
3487 
3488   oop_maps->add_gc_map( __ offset() - start, map);
3489 
3490   // rax, contains the address we are going to jump to assuming no exception got installed
3491 
3492   __ addptr(rsp, wordSize);
3493 
3494   // clear last_Java_sp
3495   __ reset_last_Java_frame(thread, true, false);
3496   // check for pending exceptions
3497   Label pending;
3498   __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3499   __ jcc(Assembler::notEqual, pending);
3500 
3501   // get the returned Method*
3502   __ get_vm_result_2(rbx, thread);
3503   __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
3504 
3505   __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
3506 
3507   RegisterSaver::restore_live_registers(masm);
3508 
3509   // We are back the the original state on entry and ready to go.
3510 
3511   __ jmp(rax);
3512 
3513   // Pending exception after the safepoint
3514 
3515   __ bind(pending);
3516 
3517   RegisterSaver::restore_live_registers(masm);
3518 
3519   // exception pending => remove activation and forward to exception handler
3520 
3521   __ get_thread(thread);
3522   __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
3523   __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
3524   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3525 
3526   // -------------
3527   // make sure all code is generated
3528   masm->flush();
3529 
3530   // return the  blob
3531   // frame_size_words or bytes??
3532   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3533 }
--- EOF ---