1 /*
   2  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "oops/compiledICHolder.hpp"
  33 #include "prims/jvmtiRedefineClassesTrace.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/vframeArray.hpp"
  36 #include "vmreg_x86.inline.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 #ifdef COMPILER2
  41 #include "opto/runtime.hpp"
  42 #endif
  43 
  44 #define __ masm->
  45 
  46 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  47 
  48 class RegisterSaver {
  49   // Capture info about frame layout
  50 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  51   enum layout {
  52                 fpu_state_off = 0,
  53                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  54                 st0_off, st0H_off,
  55                 st1_off, st1H_off,
  56                 st2_off, st2H_off,
  57                 st3_off, st3H_off,
  58                 st4_off, st4H_off,
  59                 st5_off, st5H_off,
  60                 st6_off, st6H_off,
  61                 st7_off, st7H_off,
  62                 xmm_off,
  63                 DEF_XMM_OFFS(0),
  64                 DEF_XMM_OFFS(1),
  65                 DEF_XMM_OFFS(2),
  66                 DEF_XMM_OFFS(3),
  67                 DEF_XMM_OFFS(4),
  68                 DEF_XMM_OFFS(5),
  69                 DEF_XMM_OFFS(6),
  70                 DEF_XMM_OFFS(7),
  71                 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
  72                 rdi_off,
  73                 rsi_off,
  74                 ignore_off,  // extra copy of rbp,
  75                 rsp_off,
  76                 rbx_off,
  77                 rdx_off,
  78                 rcx_off,
  79                 rax_off,
  80                 // The frame sender code expects that rbp will be in the "natural" place and
  81                 // will override any oopMap setting for it. We must therefore force the layout
  82                 // so that it agrees with the frame sender code.
  83                 rbp_off,
  84                 return_off,      // slot for return address
  85                 reg_save_size };
  86   enum { FPU_regs_live = flags_off - fpu_state_end };
  87 
  88   public:
  89 
  90   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
  91                                      int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
  92   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
  93 
  94   static int rax_offset() { return rax_off; }
  95   static int rbx_offset() { return rbx_off; }
  96 
  97   // Offsets into the register save area
  98   // Used by deoptimization when it is managing result register
  99   // values on its own
 100 
 101   static int raxOffset(void) { return rax_off; }
 102   static int rdxOffset(void) { return rdx_off; }
 103   static int rbxOffset(void) { return rbx_off; }
 104   static int xmm0Offset(void) { return xmm0_off; }
 105   // This really returns a slot in the fp save area, which one is not important
 106   static int fpResultOffset(void) { return st0_off; }
 107 
 108   // During deoptimization only the result register need to be restored
 109   // all the other values have already been extracted.
 110 
 111   static void restore_result_registers(MacroAssembler* masm);
 112 
 113 };
 114 
 115 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 116                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 117   int vect_words = 0;
 118   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 119 #ifdef COMPILER2
 120   if (save_vectors) {
 121     assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
 122     assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
 123     // Save upper half of ZMM/YMM registers :
 124     vect_words = 8 * 16 / wordSize;
 125     additional_frame_words += vect_words;
 126   }
 127 #else
 128   assert(!save_vectors, "vectors are generated only by C2");
 129 #endif
 130   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 131   int frame_words = frame_size_in_bytes / wordSize;
 132   *total_frame_words = frame_words;
 133 
 134   assert(FPUStateSizeInWords == 27, "update stack layout");
 135 
 136   // save registers, fpu state, and flags
 137   // We assume caller has already has return address slot on the stack
 138   // We push epb twice in this sequence because we want the real rbp,
 139   // to be under the return like a normal enter and we want to use pusha
 140   // We push by hand instead of pusing push
 141   __ enter();
 142   __ pusha();
 143   __ pushf();
 144   __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
 145   __ push_FPU_state();          // Save FPU state & init
 146 
 147   if (verify_fpu) {
 148     // Some stubs may have non standard FPU control word settings so
 149     // only check and reset the value when it required to be the
 150     // standard value.  The safepoint blob in particular can be used
 151     // in methods which are using the 24 bit control word for
 152     // optimized float math.
 153 
 154 #ifdef ASSERT
 155     // Make sure the control word has the expected value
 156     Label ok;
 157     __ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
 158     __ jccb(Assembler::equal, ok);
 159     __ stop("corrupted control word detected");
 160     __ bind(ok);
 161 #endif
 162 
 163     // Reset the control word to guard against exceptions being unmasked
 164     // since fstp_d can cause FPU stack underflow exceptions.  Write it
 165     // into the on stack copy and then reload that to make sure that the
 166     // current and future values are correct.
 167     __ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
 168   }
 169 
 170   __ frstor(Address(rsp, 0));
 171   if (!verify_fpu) {
 172     // Set the control word so that exceptions are masked for the
 173     // following code.
 174     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
 175   }
 176 
 177   int off = st0_off;
 178   int delta = st1_off - off;
 179 
 180   // Save the FPU registers in de-opt-able form
 181   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 182     __ fstp_d(Address(rsp, off*wordSize));
 183     off += delta;
 184   }
 185 
 186   off = xmm0_off;
 187   delta = xmm1_off - off;
 188   if(UseSSE == 1) {           // Save the XMM state
 189     for (int n = 0; n < num_xmm_regs; n++) {
 190       __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
 191       off += delta;
 192     }
 193   } else if(UseSSE >= 2) {
 194     // Save whole 128bit (16 bytes) XMM regiters
 195     if (VM_Version::supports_avx512novl()) {
 196       for (int n = 0; n < num_xmm_regs; n++) {
 197         __ vextractf32x4h(Address(rsp, off*wordSize), as_XMMRegister(n), 0);
 198         off += delta;
 199       }
 200     } else {
 201       for (int n = 0; n < num_xmm_regs; n++) {
 202         __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 203         off += delta;
 204       }
 205     }
 206   }
 207 
 208   if (vect_words > 0) {
 209     assert(vect_words*wordSize == 128, "");
 210     __ subptr(rsp, 128); // Save upper half of YMM registes
 211     off = 0;
 212     for (int n = 0; n < num_xmm_regs; n++) {
 213       __ vextractf128h(Address(rsp, off++*16), as_XMMRegister(n));
 214     }
 215     if (UseAVX > 2) {
 216       __ subptr(rsp, 256); // Save upper half of ZMM registes
 217       off = 0;
 218       for (int n = 0; n < num_xmm_regs; n++) {
 219         __ vextractf64x4h(Address(rsp, off++*32), as_XMMRegister(n));
 220       }
 221     }
 222   }
 223 
 224   // Set an oopmap for the call site.  This oopmap will map all
 225   // oop-registers and debug-info registers as callee-saved.  This
 226   // will allow deoptimization at this safepoint to find all possible
 227   // debug-info recordings, as well as let GC find all oops.
 228 
 229   OopMapSet *oop_maps = new OopMapSet();
 230   OopMap* map =  new OopMap( frame_words, 0 );
 231 
 232 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 233 #define NEXTREG(x) (x)->as_VMReg()->next()
 234 
 235   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 236   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 237   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 238   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 239   // rbp, location is known implicitly, no oopMap
 240   map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
 241   map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
 242   // %%% This is really a waste but we'll keep things as they were for now for the upper component
 243   off = st0_off;
 244   delta = st1_off - off;
 245   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 246     FloatRegister freg_name = as_FloatRegister(n);
 247     map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
 248     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
 249     off += delta;
 250   }
 251   off = xmm0_off;
 252   delta = xmm1_off - off;
 253   for (int n = 0; n < num_xmm_regs; n++) {
 254     XMMRegister xmm_name = as_XMMRegister(n);
 255     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 256     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
 257     off += delta;
 258   }
 259 #undef NEXTREG
 260 #undef STACK_OFFSET
 261 
 262   return map;
 263 }
 264 
 265 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 266   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 267   // Recover XMM & FPU state
 268   int additional_frame_bytes = 0;
 269 #ifdef COMPILER2
 270   if (restore_vectors) {
 271     assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
 272     assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
 273     additional_frame_bytes = 128;
 274   }
 275 #else
 276   assert(!restore_vectors, "vectors are generated only by C2");
 277 #endif
 278   int off = xmm0_off;
 279   int delta = xmm1_off - off;
 280 
 281   if (UseSSE == 1) {
 282     assert(additional_frame_bytes == 0, "");
 283     for (int n = 0; n < num_xmm_regs; n++) {
 284       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 285       off += delta;
 286     }
 287   } else if (UseSSE >= 2) {
 288     if (VM_Version::supports_avx512novl()) {
 289       for (int n = 0; n < num_xmm_regs; n++) {
 290         __ vinsertf32x4h(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes), 0);
 291         off += delta;
 292       }
 293     } else {
 294       for (int n = 0; n < num_xmm_regs; n++) {
 295         __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 296         off += delta;
 297       }
 298     }
 299   }
 300   if (restore_vectors) {
 301     if (UseAVX > 2) {
 302       off = 0;
 303       for (int n = 0; n < num_xmm_regs; n++) {
 304         __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, off++*32));
 305       }
 306       __ addptr(rsp, additional_frame_bytes*2); // Save upper half of ZMM registes
 307     }
 308     // Restore upper half of YMM registes.
 309     assert(additional_frame_bytes == 128, "");
 310     off = 0;
 311     for (int n = 0; n < num_xmm_regs; n++) {
 312       __ vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16));
 313     }
 314     __ addptr(rsp, additional_frame_bytes); // Save upper half of YMM registes
 315   }
 316   __ pop_FPU_state();
 317   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 318 
 319   __ popf();
 320   __ popa();
 321   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 322   __ pop(rbp);
 323 
 324 }
 325 
 326 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 327 
 328   // Just restore result register. Only used by deoptimization. By
 329   // now any callee save register that needs to be restore to a c2
 330   // caller of the deoptee has been extracted into the vframeArray
 331   // and will be stuffed into the c2i adapter we create for later
 332   // restoration so only result registers need to be restored here.
 333   //
 334 
 335   __ frstor(Address(rsp, 0));      // Restore fpu state
 336 
 337   // Recover XMM & FPU state
 338   if( UseSSE == 1 ) {
 339     __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
 340   } else if( UseSSE >= 2 ) {
 341     __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
 342   }
 343   __ movptr(rax, Address(rsp, rax_off*wordSize));
 344   __ movptr(rdx, Address(rsp, rdx_off*wordSize));
 345   // Pop all of the register save are off the stack except the return address
 346   __ addptr(rsp, return_off * wordSize);
 347 }
 348 
 349 // Is vector's size (in bytes) bigger than a size saved by default?
 350 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
 351 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
 352 bool SharedRuntime::is_wide_vector(int size) {
 353   return size > 16;
 354 }
 355 
 356 // The java_calling_convention describes stack locations as ideal slots on
 357 // a frame with no abi restrictions. Since we must observe abi restrictions
 358 // (like the placement of the register window) the slots must be biased by
 359 // the following value.
 360 static int reg2offset_in(VMReg r) {
 361   // Account for saved rbp, and return address
 362   // This should really be in_preserve_stack_slots
 363   return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
 364 }
 365 
 366 static int reg2offset_out(VMReg r) {
 367   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 368 }
 369 
 370 // ---------------------------------------------------------------------------
 371 // Read the array of BasicTypes from a signature, and compute where the
 372 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 373 // quantities.  Values less than SharedInfo::stack0 are registers, those above
 374 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 375 // as framesizes are fixed.
 376 // VMRegImpl::stack0 refers to the first slot 0(sp).
 377 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 378 // up to RegisterImpl::number_of_registers) are the 32-bit
 379 // integer registers.
 380 
 381 // Pass first two oop/int args in registers ECX and EDX.
 382 // Pass first two float/double args in registers XMM0 and XMM1.
 383 // Doubles have precedence, so if you pass a mix of floats and doubles
 384 // the doubles will grab the registers before the floats will.
 385 
 386 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 387 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 388 // units regardless of build. Of course for i486 there is no 64 bit build
 389 
 390 
 391 // ---------------------------------------------------------------------------
 392 // The compiled Java calling convention.
 393 // Pass first two oop/int args in registers ECX and EDX.
 394 // Pass first two float/double args in registers XMM0 and XMM1.
 395 // Doubles have precedence, so if you pass a mix of floats and doubles
 396 // the doubles will grab the registers before the floats will.
 397 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 398                                            VMRegPair *regs,
 399                                            int total_args_passed,
 400                                            int is_outgoing) {
 401   uint    stack = 0;          // Starting stack position for args on stack
 402 
 403 
 404   // Pass first two oop/int args in registers ECX and EDX.
 405   uint reg_arg0 = 9999;
 406   uint reg_arg1 = 9999;
 407 
 408   // Pass first two float/double args in registers XMM0 and XMM1.
 409   // Doubles have precedence, so if you pass a mix of floats and doubles
 410   // the doubles will grab the registers before the floats will.
 411   // CNC - TURNED OFF FOR non-SSE.
 412   //       On Intel we have to round all doubles (and most floats) at
 413   //       call sites by storing to the stack in any case.
 414   // UseSSE=0 ==> Don't Use ==> 9999+0
 415   // UseSSE=1 ==> Floats only ==> 9999+1
 416   // UseSSE>=2 ==> Floats or doubles ==> 9999+2
 417   enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
 418   uint fargs = (UseSSE>=2) ? 2 : UseSSE;
 419   uint freg_arg0 = 9999+fargs;
 420   uint freg_arg1 = 9999+fargs;
 421 
 422   // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
 423   int i;
 424   for( i = 0; i < total_args_passed; i++) {
 425     if( sig_bt[i] == T_DOUBLE ) {
 426       // first 2 doubles go in registers
 427       if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
 428       else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
 429       else // Else double is passed low on the stack to be aligned.
 430         stack += 2;
 431     } else if( sig_bt[i] == T_LONG ) {
 432       stack += 2;
 433     }
 434   }
 435   int dstack = 0;             // Separate counter for placing doubles
 436 
 437   // Now pick where all else goes.
 438   for( i = 0; i < total_args_passed; i++) {
 439     // From the type and the argument number (count) compute the location
 440     switch( sig_bt[i] ) {
 441     case T_SHORT:
 442     case T_CHAR:
 443     case T_BYTE:
 444     case T_BOOLEAN:
 445     case T_INT:
 446     case T_ARRAY:
 447     case T_OBJECT:
 448     case T_ADDRESS:
 449       if( reg_arg0 == 9999 )  {
 450         reg_arg0 = i;
 451         regs[i].set1(rcx->as_VMReg());
 452       } else if( reg_arg1 == 9999 )  {
 453         reg_arg1 = i;
 454         regs[i].set1(rdx->as_VMReg());
 455       } else {
 456         regs[i].set1(VMRegImpl::stack2reg(stack++));
 457       }
 458       break;
 459     case T_FLOAT:
 460       if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
 461         freg_arg0 = i;
 462         regs[i].set1(xmm0->as_VMReg());
 463       } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
 464         freg_arg1 = i;
 465         regs[i].set1(xmm1->as_VMReg());
 466       } else {
 467         regs[i].set1(VMRegImpl::stack2reg(stack++));
 468       }
 469       break;
 470     case T_LONG:
 471       assert(sig_bt[i+1] == T_VOID, "missing Half" );
 472       regs[i].set2(VMRegImpl::stack2reg(dstack));
 473       dstack += 2;
 474       break;
 475     case T_DOUBLE:
 476       assert(sig_bt[i+1] == T_VOID, "missing Half" );
 477       if( freg_arg0 == (uint)i ) {
 478         regs[i].set2(xmm0->as_VMReg());
 479       } else if( freg_arg1 == (uint)i ) {
 480         regs[i].set2(xmm1->as_VMReg());
 481       } else {
 482         regs[i].set2(VMRegImpl::stack2reg(dstack));
 483         dstack += 2;
 484       }
 485       break;
 486     case T_VOID: regs[i].set_bad(); break;
 487       break;
 488     default:
 489       ShouldNotReachHere();
 490       break;
 491     }
 492   }
 493 
 494   // return value can be odd number of VMRegImpl stack slots make multiple of 2
 495   return round_to(stack, 2);
 496 }
 497 
 498 // Patch the callers callsite with entry to compiled code if it exists.
 499 static void patch_callers_callsite(MacroAssembler *masm) {
 500   Label L;
 501   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 502   __ jcc(Assembler::equal, L);
 503   // Schedule the branch target address early.
 504   // Call into the VM to patch the caller, then jump to compiled callee
 505   // rax, isn't live so capture return address while we easily can
 506   __ movptr(rax, Address(rsp, 0));
 507   __ pusha();
 508   __ pushf();
 509 
 510   if (UseSSE == 1) {
 511     __ subptr(rsp, 2*wordSize);
 512     __ movflt(Address(rsp, 0), xmm0);
 513     __ movflt(Address(rsp, wordSize), xmm1);
 514   }
 515   if (UseSSE >= 2) {
 516     __ subptr(rsp, 4*wordSize);
 517     __ movdbl(Address(rsp, 0), xmm0);
 518     __ movdbl(Address(rsp, 2*wordSize), xmm1);
 519   }
 520 #ifdef COMPILER2
 521   // C2 may leave the stack dirty if not in SSE2+ mode
 522   if (UseSSE >= 2) {
 523     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 524   } else {
 525     __ empty_FPU_stack();
 526   }
 527 #endif /* COMPILER2 */
 528 
 529   // VM needs caller's callsite
 530   __ push(rax);
 531   // VM needs target method
 532   __ push(rbx);
 533   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 534   __ addptr(rsp, 2*wordSize);
 535 
 536   if (UseSSE == 1) {
 537     __ movflt(xmm0, Address(rsp, 0));
 538     __ movflt(xmm1, Address(rsp, wordSize));
 539     __ addptr(rsp, 2*wordSize);
 540   }
 541   if (UseSSE >= 2) {
 542     __ movdbl(xmm0, Address(rsp, 0));
 543     __ movdbl(xmm1, Address(rsp, 2*wordSize));
 544     __ addptr(rsp, 4*wordSize);
 545   }
 546 
 547   __ popf();
 548   __ popa();
 549   __ bind(L);
 550 }
 551 
 552 
 553 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
 554   int next_off = st_off - Interpreter::stackElementSize;
 555   __ movdbl(Address(rsp, next_off), r);
 556 }
 557 
 558 static void gen_c2i_adapter(MacroAssembler *masm,
 559                             int total_args_passed,
 560                             int comp_args_on_stack,
 561                             const BasicType *sig_bt,
 562                             const VMRegPair *regs,
 563                             Label& skip_fixup) {
 564   // Before we get into the guts of the C2I adapter, see if we should be here
 565   // at all.  We've come from compiled code and are attempting to jump to the
 566   // interpreter, which means the caller made a static call to get here
 567   // (vcalls always get a compiled target if there is one).  Check for a
 568   // compiled target.  If there is one, we need to patch the caller's call.
 569   patch_callers_callsite(masm);
 570 
 571   __ bind(skip_fixup);
 572 
 573 #ifdef COMPILER2
 574   // C2 may leave the stack dirty if not in SSE2+ mode
 575   if (UseSSE >= 2) {
 576     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 577   } else {
 578     __ empty_FPU_stack();
 579   }
 580 #endif /* COMPILER2 */
 581 
 582   // Since all args are passed on the stack, total_args_passed * interpreter_
 583   // stack_element_size  is the
 584   // space we need.
 585   int extraspace = total_args_passed * Interpreter::stackElementSize;
 586 
 587   // Get return address
 588   __ pop(rax);
 589 
 590   // set senderSP value
 591   __ movptr(rsi, rsp);
 592 
 593   __ subptr(rsp, extraspace);
 594 
 595   // Now write the args into the outgoing interpreter space
 596   for (int i = 0; i < total_args_passed; i++) {
 597     if (sig_bt[i] == T_VOID) {
 598       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 599       continue;
 600     }
 601 
 602     // st_off points to lowest address on stack.
 603     int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
 604     int next_off = st_off - Interpreter::stackElementSize;
 605 
 606     // Say 4 args:
 607     // i   st_off
 608     // 0   12 T_LONG
 609     // 1    8 T_VOID
 610     // 2    4 T_OBJECT
 611     // 3    0 T_BOOL
 612     VMReg r_1 = regs[i].first();
 613     VMReg r_2 = regs[i].second();
 614     if (!r_1->is_valid()) {
 615       assert(!r_2->is_valid(), "");
 616       continue;
 617     }
 618 
 619     if (r_1->is_stack()) {
 620       // memory to memory use fpu stack top
 621       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 622 
 623       if (!r_2->is_valid()) {
 624         __ movl(rdi, Address(rsp, ld_off));
 625         __ movptr(Address(rsp, st_off), rdi);
 626       } else {
 627 
 628         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
 629         // st_off == MSW, st_off-wordSize == LSW
 630 
 631         __ movptr(rdi, Address(rsp, ld_off));
 632         __ movptr(Address(rsp, next_off), rdi);
 633 #ifndef _LP64
 634         __ movptr(rdi, Address(rsp, ld_off + wordSize));
 635         __ movptr(Address(rsp, st_off), rdi);
 636 #else
 637 #ifdef ASSERT
 638         // Overwrite the unused slot with known junk
 639         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 640         __ movptr(Address(rsp, st_off), rax);
 641 #endif /* ASSERT */
 642 #endif // _LP64
 643       }
 644     } else if (r_1->is_Register()) {
 645       Register r = r_1->as_Register();
 646       if (!r_2->is_valid()) {
 647         __ movl(Address(rsp, st_off), r);
 648       } else {
 649         // long/double in gpr
 650         NOT_LP64(ShouldNotReachHere());
 651         // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 652         // T_DOUBLE and T_LONG use two slots in the interpreter
 653         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 654           // long/double in gpr
 655 #ifdef ASSERT
 656           // Overwrite the unused slot with known junk
 657           LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
 658           __ movptr(Address(rsp, st_off), rax);
 659 #endif /* ASSERT */
 660           __ movptr(Address(rsp, next_off), r);
 661         } else {
 662           __ movptr(Address(rsp, st_off), r);
 663         }
 664       }
 665     } else {
 666       assert(r_1->is_XMMRegister(), "");
 667       if (!r_2->is_valid()) {
 668         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 669       } else {
 670         assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
 671         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
 672       }
 673     }
 674   }
 675 
 676   // Schedule the branch target address early.
 677   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 678   // And repush original return address
 679   __ push(rax);
 680   __ jmp(rcx);
 681 }
 682 
 683 
 684 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
 685   int next_val_off = ld_off - Interpreter::stackElementSize;
 686   __ movdbl(r, Address(saved_sp, next_val_off));
 687 }
 688 
 689 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 690                         address code_start, address code_end,
 691                         Label& L_ok) {
 692   Label L_fail;
 693   __ lea(temp_reg, ExternalAddress(code_start));
 694   __ cmpptr(pc_reg, temp_reg);
 695   __ jcc(Assembler::belowEqual, L_fail);
 696   __ lea(temp_reg, ExternalAddress(code_end));
 697   __ cmpptr(pc_reg, temp_reg);
 698   __ jcc(Assembler::below, L_ok);
 699   __ bind(L_fail);
 700 }
 701 
 702 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 703                                     int total_args_passed,
 704                                     int comp_args_on_stack,
 705                                     const BasicType *sig_bt,
 706                                     const VMRegPair *regs) {
 707   // Note: rsi contains the senderSP on entry. We must preserve it since
 708   // we may do a i2c -> c2i transition if we lose a race where compiled
 709   // code goes non-entrant while we get args ready.
 710 
 711   // Adapters can be frameless because they do not require the caller
 712   // to perform additional cleanup work, such as correcting the stack pointer.
 713   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 714   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 715   // even if a callee has modified the stack pointer.
 716   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 717   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 718   // up via the senderSP register).
 719   // In other words, if *either* the caller or callee is interpreted, we can
 720   // get the stack pointer repaired after a call.
 721   // This is why c2i and i2c adapters cannot be indefinitely composed.
 722   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 723   // both caller and callee would be compiled methods, and neither would
 724   // clean up the stack pointer changes performed by the two adapters.
 725   // If this happens, control eventually transfers back to the compiled
 726   // caller, but with an uncorrected stack, causing delayed havoc.
 727 
 728   // Pick up the return address
 729   __ movptr(rax, Address(rsp, 0));
 730 
 731   if (VerifyAdapterCalls &&
 732       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 733     // So, let's test for cascading c2i/i2c adapters right now.
 734     //  assert(Interpreter::contains($return_addr) ||
 735     //         StubRoutines::contains($return_addr),
 736     //         "i2c adapter must return to an interpreter frame");
 737     __ block_comment("verify_i2c { ");
 738     Label L_ok;
 739     if (Interpreter::code() != NULL)
 740       range_check(masm, rax, rdi,
 741                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 742                   L_ok);
 743     if (StubRoutines::code1() != NULL)
 744       range_check(masm, rax, rdi,
 745                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 746                   L_ok);
 747     if (StubRoutines::code2() != NULL)
 748       range_check(masm, rax, rdi,
 749                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 750                   L_ok);
 751     const char* msg = "i2c adapter must return to an interpreter frame";
 752     __ block_comment(msg);
 753     __ stop(msg);
 754     __ bind(L_ok);
 755     __ block_comment("} verify_i2ce ");
 756   }
 757 
 758   // Must preserve original SP for loading incoming arguments because
 759   // we need to align the outgoing SP for compiled code.
 760   __ movptr(rdi, rsp);
 761 
 762   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
 763   // in registers, we will occasionally have no stack args.
 764   int comp_words_on_stack = 0;
 765   if (comp_args_on_stack) {
 766     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
 767     // registers are below.  By subtracting stack0, we either get a negative
 768     // number (all values in registers) or the maximum stack slot accessed.
 769     // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
 770     // Convert 4-byte stack slots to words.
 771     comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
 772     // Round up to miminum stack alignment, in wordSize
 773     comp_words_on_stack = round_to(comp_words_on_stack, 2);
 774     __ subptr(rsp, comp_words_on_stack * wordSize);
 775   }
 776 
 777   // Align the outgoing SP
 778   __ andptr(rsp, -(StackAlignmentInBytes));
 779 
 780   // push the return address on the stack (note that pushing, rather
 781   // than storing it, yields the correct frame alignment for the callee)
 782   __ push(rax);
 783 
 784   // Put saved SP in another register
 785   const Register saved_sp = rax;
 786   __ movptr(saved_sp, rdi);
 787 
 788 
 789   // Will jump to the compiled code just as if compiled code was doing it.
 790   // Pre-load the register-jump target early, to schedule it better.
 791   __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
 792 
 793   // Now generate the shuffle code.  Pick up all register args and move the
 794   // rest through the floating point stack top.
 795   for (int i = 0; i < total_args_passed; i++) {
 796     if (sig_bt[i] == T_VOID) {
 797       // Longs and doubles are passed in native word order, but misaligned
 798       // in the 32-bit build.
 799       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 800       continue;
 801     }
 802 
 803     // Pick up 0, 1 or 2 words from SP+offset.
 804 
 805     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 806             "scrambled load targets?");
 807     // Load in argument order going down.
 808     int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
 809     // Point to interpreter value (vs. tag)
 810     int next_off = ld_off - Interpreter::stackElementSize;
 811     //
 812     //
 813     //
 814     VMReg r_1 = regs[i].first();
 815     VMReg r_2 = regs[i].second();
 816     if (!r_1->is_valid()) {
 817       assert(!r_2->is_valid(), "");
 818       continue;
 819     }
 820     if (r_1->is_stack()) {
 821       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 822       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 823 
 824       // We can use rsi as a temp here because compiled code doesn't need rsi as an input
 825       // and if we end up going thru a c2i because of a miss a reasonable value of rsi
 826       // we be generated.
 827       if (!r_2->is_valid()) {
 828         // __ fld_s(Address(saved_sp, ld_off));
 829         // __ fstp_s(Address(rsp, st_off));
 830         __ movl(rsi, Address(saved_sp, ld_off));
 831         __ movptr(Address(rsp, st_off), rsi);
 832       } else {
 833         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 834         // are accessed as negative so LSW is at LOW address
 835 
 836         // ld_off is MSW so get LSW
 837         // st_off is LSW (i.e. reg.first())
 838         // __ fld_d(Address(saved_sp, next_off));
 839         // __ fstp_d(Address(rsp, st_off));
 840         //
 841         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 842         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 843         // So we must adjust where to pick up the data to match the interpreter.
 844         //
 845         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 846         // are accessed as negative so LSW is at LOW address
 847 
 848         // ld_off is MSW so get LSW
 849         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 850                            next_off : ld_off;
 851         __ movptr(rsi, Address(saved_sp, offset));
 852         __ movptr(Address(rsp, st_off), rsi);
 853 #ifndef _LP64
 854         __ movptr(rsi, Address(saved_sp, ld_off));
 855         __ movptr(Address(rsp, st_off + wordSize), rsi);
 856 #endif // _LP64
 857       }
 858     } else if (r_1->is_Register()) {  // Register argument
 859       Register r = r_1->as_Register();
 860       assert(r != rax, "must be different");
 861       if (r_2->is_valid()) {
 862         //
 863         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 864         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 865         // So we must adjust where to pick up the data to match the interpreter.
 866 
 867         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 868                            next_off : ld_off;
 869 
 870         // this can be a misaligned move
 871         __ movptr(r, Address(saved_sp, offset));
 872 #ifndef _LP64
 873         assert(r_2->as_Register() != rax, "need another temporary register");
 874         // Remember r_1 is low address (and LSB on x86)
 875         // So r_2 gets loaded from high address regardless of the platform
 876         __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
 877 #endif // _LP64
 878       } else {
 879         __ movl(r, Address(saved_sp, ld_off));
 880       }
 881     } else {
 882       assert(r_1->is_XMMRegister(), "");
 883       if (!r_2->is_valid()) {
 884         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 885       } else {
 886         move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
 887       }
 888     }
 889   }
 890 
 891   // 6243940 We might end up in handle_wrong_method if
 892   // the callee is deoptimized as we race thru here. If that
 893   // happens we don't want to take a safepoint because the
 894   // caller frame will look interpreted and arguments are now
 895   // "compiled" so it is much better to make this transition
 896   // invisible to the stack walking code. Unfortunately if
 897   // we try and find the callee by normal means a safepoint
 898   // is possible. So we stash the desired callee in the thread
 899   // and the vm will find there should this case occur.
 900 
 901   __ get_thread(rax);
 902   __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
 903 
 904   // move Method* to rax, in case we end up in an c2i adapter.
 905   // the c2i adapters expect Method* in rax, (c2) because c2's
 906   // resolve stubs return the result (the method) in rax,.
 907   // I'd love to fix this.
 908   __ mov(rax, rbx);
 909 
 910   __ jmp(rdi);
 911 }
 912 
 913 // ---------------------------------------------------------------
 914 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 915                                                             int total_args_passed,
 916                                                             int comp_args_on_stack,
 917                                                             const BasicType *sig_bt,
 918                                                             const VMRegPair *regs,
 919                                                             AdapterFingerPrint* fingerprint) {
 920   address i2c_entry = __ pc();
 921 
 922   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 923 
 924   // -------------------------------------------------------------------------
 925   // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
 926   // to the interpreter.  The args start out packed in the compiled layout.  They
 927   // need to be unpacked into the interpreter layout.  This will almost always
 928   // require some stack space.  We grow the current (compiled) stack, then repack
 929   // the args.  We  finally end in a jump to the generic interpreter entry point.
 930   // On exit from the interpreter, the interpreter will restore our SP (lest the
 931   // compiled code, which relys solely on SP and not EBP, get sick).
 932 
 933   address c2i_unverified_entry = __ pc();
 934   Label skip_fixup;
 935 
 936   Register holder = rax;
 937   Register receiver = rcx;
 938   Register temp = rbx;
 939 
 940   {
 941 
 942     Label missed;
 943     __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
 944     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 945     __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
 946     __ jcc(Assembler::notEqual, missed);
 947     // Method might have been compiled since the call site was patched to
 948     // interpreted if that is the case treat it as a miss so we can get
 949     // the call site corrected.
 950     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 951     __ jcc(Assembler::equal, skip_fixup);
 952 
 953     __ bind(missed);
 954     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 955   }
 956 
 957   address c2i_entry = __ pc();
 958 
 959   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 960 
 961   __ flush();
 962   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 963 }
 964 
 965 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 966                                          VMRegPair *regs,
 967                                          VMRegPair *regs2,
 968                                          int total_args_passed) {
 969   assert(regs2 == NULL, "not needed on x86");
 970 // We return the amount of VMRegImpl stack slots we need to reserve for all
 971 // the arguments NOT counting out_preserve_stack_slots.
 972 
 973   uint    stack = 0;        // All arguments on stack
 974 
 975   for( int i = 0; i < total_args_passed; i++) {
 976     // From the type and the argument number (count) compute the location
 977     switch( sig_bt[i] ) {
 978     case T_BOOLEAN:
 979     case T_CHAR:
 980     case T_FLOAT:
 981     case T_BYTE:
 982     case T_SHORT:
 983     case T_INT:
 984     case T_OBJECT:
 985     case T_ARRAY:
 986     case T_ADDRESS:
 987     case T_METADATA:
 988       regs[i].set1(VMRegImpl::stack2reg(stack++));
 989       break;
 990     case T_LONG:
 991     case T_DOUBLE: // The stack numbering is reversed from Java
 992       // Since C arguments do not get reversed, the ordering for
 993       // doubles on the stack must be opposite the Java convention
 994       assert(sig_bt[i+1] == T_VOID, "missing Half" );
 995       regs[i].set2(VMRegImpl::stack2reg(stack));
 996       stack += 2;
 997       break;
 998     case T_VOID: regs[i].set_bad(); break;
 999     default:
1000       ShouldNotReachHere();
1001       break;
1002     }
1003   }
1004   return stack;
1005 }
1006 
1007 // A simple move of integer like type
1008 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1009   if (src.first()->is_stack()) {
1010     if (dst.first()->is_stack()) {
1011       // stack to stack
1012       // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1013       // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1014       __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1015       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1016     } else {
1017       // stack to reg
1018       __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
1019     }
1020   } else if (dst.first()->is_stack()) {
1021     // reg to stack
1022     // no need to sign extend on 64bit
1023     __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1024   } else {
1025     if (dst.first() != src.first()) {
1026       __ mov(dst.first()->as_Register(), src.first()->as_Register());
1027     }
1028   }
1029 }
1030 
1031 // An oop arg. Must pass a handle not the oop itself
1032 static void object_move(MacroAssembler* masm,
1033                         OopMap* map,
1034                         int oop_handle_offset,
1035                         int framesize_in_slots,
1036                         VMRegPair src,
1037                         VMRegPair dst,
1038                         bool is_receiver,
1039                         int* receiver_offset) {
1040 
1041   // Because of the calling conventions we know that src can be a
1042   // register or a stack location. dst can only be a stack location.
1043 
1044   assert(dst.first()->is_stack(), "must be stack");
1045   // must pass a handle. First figure out the location we use as a handle
1046 
1047   if (src.first()->is_stack()) {
1048     // Oop is already on the stack as an argument
1049     Register rHandle = rax;
1050     Label nil;
1051     __ xorptr(rHandle, rHandle);
1052     __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1053     __ jcc(Assembler::equal, nil);
1054     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1055     __ bind(nil);
1056     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1057 
1058     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1059     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1060     if (is_receiver) {
1061       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1062     }
1063   } else {
1064     // Oop is in an a register we must store it to the space we reserve
1065     // on the stack for oop_handles
1066     const Register rOop = src.first()->as_Register();
1067     const Register rHandle = rax;
1068     int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1069     int offset = oop_slot*VMRegImpl::stack_slot_size;
1070     Label skip;
1071     __ movptr(Address(rsp, offset), rOop);
1072     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1073     __ xorptr(rHandle, rHandle);
1074     __ cmpptr(rOop, (int32_t)NULL_WORD);
1075     __ jcc(Assembler::equal, skip);
1076     __ lea(rHandle, Address(rsp, offset));
1077     __ bind(skip);
1078     // Store the handle parameter
1079     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1080     if (is_receiver) {
1081       *receiver_offset = offset;
1082     }
1083   }
1084 }
1085 
1086 // A float arg may have to do float reg int reg conversion
1087 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1088   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1089 
1090   // Because of the calling convention we know that src is either a stack location
1091   // or an xmm register. dst can only be a stack location.
1092 
1093   assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1094 
1095   if (src.first()->is_stack()) {
1096     __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1097     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1098   } else {
1099     // reg to stack
1100     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1101   }
1102 }
1103 
1104 // A long move
1105 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1106 
1107   // The only legal possibility for a long_move VMRegPair is:
1108   // 1: two stack slots (possibly unaligned)
1109   // as neither the java  or C calling convention will use registers
1110   // for longs.
1111 
1112   if (src.first()->is_stack() && dst.first()->is_stack()) {
1113     assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1114     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1115     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1116     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1117     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1118   } else {
1119     ShouldNotReachHere();
1120   }
1121 }
1122 
1123 // A double move
1124 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1125 
1126   // The only legal possibilities for a double_move VMRegPair are:
1127   // The painful thing here is that like long_move a VMRegPair might be
1128 
1129   // Because of the calling convention we know that src is either
1130   //   1: a single physical register (xmm registers only)
1131   //   2: two stack slots (possibly unaligned)
1132   // dst can only be a pair of stack slots.
1133 
1134   assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1135 
1136   if (src.first()->is_stack()) {
1137     // source is all stack
1138     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1139     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1140     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1141     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1142   } else {
1143     // reg to stack
1144     // No worries about stack alignment
1145     __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1146   }
1147 }
1148 
1149 
1150 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1151   // We always ignore the frame_slots arg and just use the space just below frame pointer
1152   // which by this time is free to use
1153   switch (ret_type) {
1154   case T_FLOAT:
1155     __ fstp_s(Address(rbp, -wordSize));
1156     break;
1157   case T_DOUBLE:
1158     __ fstp_d(Address(rbp, -2*wordSize));
1159     break;
1160   case T_VOID:  break;
1161   case T_LONG:
1162     __ movptr(Address(rbp, -wordSize), rax);
1163     NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
1164     break;
1165   default: {
1166     __ movptr(Address(rbp, -wordSize), rax);
1167     }
1168   }
1169 }
1170 
1171 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1172   // We always ignore the frame_slots arg and just use the space just below frame pointer
1173   // which by this time is free to use
1174   switch (ret_type) {
1175   case T_FLOAT:
1176     __ fld_s(Address(rbp, -wordSize));
1177     break;
1178   case T_DOUBLE:
1179     __ fld_d(Address(rbp, -2*wordSize));
1180     break;
1181   case T_LONG:
1182     __ movptr(rax, Address(rbp, -wordSize));
1183     NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
1184     break;
1185   case T_VOID:  break;
1186   default: {
1187     __ movptr(rax, Address(rbp, -wordSize));
1188     }
1189   }
1190 }
1191 
1192 
1193 static void save_or_restore_arguments(MacroAssembler* masm,
1194                                       const int stack_slots,
1195                                       const int total_in_args,
1196                                       const int arg_save_area,
1197                                       OopMap* map,
1198                                       VMRegPair* in_regs,
1199                                       BasicType* in_sig_bt) {
1200   // if map is non-NULL then the code should store the values,
1201   // otherwise it should load them.
1202   int handle_index = 0;
1203   // Save down double word first
1204   for ( int i = 0; i < total_in_args; i++) {
1205     if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1206       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1207       int offset = slot * VMRegImpl::stack_slot_size;
1208       handle_index += 2;
1209       assert(handle_index <= stack_slots, "overflow");
1210       if (map != NULL) {
1211         __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1212       } else {
1213         __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1214       }
1215     }
1216     if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
1217       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1218       int offset = slot * VMRegImpl::stack_slot_size;
1219       handle_index += 2;
1220       assert(handle_index <= stack_slots, "overflow");
1221       if (map != NULL) {
1222         __ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
1223         if (in_regs[i].second()->is_Register()) {
1224           __ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
1225         }
1226       } else {
1227         __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
1228         if (in_regs[i].second()->is_Register()) {
1229           __ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
1230         }
1231       }
1232     }
1233   }
1234   // Save or restore single word registers
1235   for ( int i = 0; i < total_in_args; i++) {
1236     if (in_regs[i].first()->is_Register()) {
1237       int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1238       int offset = slot * VMRegImpl::stack_slot_size;
1239       assert(handle_index <= stack_slots, "overflow");
1240       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1241         map->set_oop(VMRegImpl::stack2reg(slot));;
1242       }
1243 
1244       // Value is in an input register pass we must flush it to the stack
1245       const Register reg = in_regs[i].first()->as_Register();
1246       switch (in_sig_bt[i]) {
1247         case T_ARRAY:
1248           if (map != NULL) {
1249             __ movptr(Address(rsp, offset), reg);
1250           } else {
1251             __ movptr(reg, Address(rsp, offset));
1252           }
1253           break;
1254         case T_BOOLEAN:
1255         case T_CHAR:
1256         case T_BYTE:
1257         case T_SHORT:
1258         case T_INT:
1259           if (map != NULL) {
1260             __ movl(Address(rsp, offset), reg);
1261           } else {
1262             __ movl(reg, Address(rsp, offset));
1263           }
1264           break;
1265         case T_OBJECT:
1266         default: ShouldNotReachHere();
1267       }
1268     } else if (in_regs[i].first()->is_XMMRegister()) {
1269       if (in_sig_bt[i] == T_FLOAT) {
1270         int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1271         int offset = slot * VMRegImpl::stack_slot_size;
1272         assert(handle_index <= stack_slots, "overflow");
1273         if (map != NULL) {
1274           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1275         } else {
1276           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1277         }
1278       }
1279     } else if (in_regs[i].first()->is_stack()) {
1280       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1281         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1282         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1283       }
1284     }
1285   }
1286 }
1287 
1288 // Check GC_locker::needs_gc and enter the runtime if it's true.  This
1289 // keeps a new JNI critical region from starting until a GC has been
1290 // forced.  Save down any oops in registers and describe them in an
1291 // OopMap.
1292 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1293                                                Register thread,
1294                                                int stack_slots,
1295                                                int total_c_args,
1296                                                int total_in_args,
1297                                                int arg_save_area,
1298                                                OopMapSet* oop_maps,
1299                                                VMRegPair* in_regs,
1300                                                BasicType* in_sig_bt) {
1301   __ block_comment("check GC_locker::needs_gc");
1302   Label cont;
1303   __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
1304   __ jcc(Assembler::equal, cont);
1305 
1306   // Save down any incoming oops and call into the runtime to halt for a GC
1307 
1308   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1309 
1310   save_or_restore_arguments(masm, stack_slots, total_in_args,
1311                             arg_save_area, map, in_regs, in_sig_bt);
1312 
1313   address the_pc = __ pc();
1314   oop_maps->add_gc_map( __ offset(), map);
1315   __ set_last_Java_frame(thread, rsp, noreg, the_pc);
1316 
1317   __ block_comment("block_for_jni_critical");
1318   __ push(thread);
1319   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1320   __ increment(rsp, wordSize);
1321 
1322   __ get_thread(thread);
1323   __ reset_last_Java_frame(thread, false, true);
1324 
1325   save_or_restore_arguments(masm, stack_slots, total_in_args,
1326                             arg_save_area, NULL, in_regs, in_sig_bt);
1327 
1328   __ bind(cont);
1329 #ifdef ASSERT
1330   if (StressCriticalJNINatives) {
1331     // Stress register saving
1332     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1333     save_or_restore_arguments(masm, stack_slots, total_in_args,
1334                               arg_save_area, map, in_regs, in_sig_bt);
1335     // Destroy argument registers
1336     for (int i = 0; i < total_in_args - 1; i++) {
1337       if (in_regs[i].first()->is_Register()) {
1338         const Register reg = in_regs[i].first()->as_Register();
1339         __ xorptr(reg, reg);
1340       } else if (in_regs[i].first()->is_XMMRegister()) {
1341         __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1342       } else if (in_regs[i].first()->is_FloatRegister()) {
1343         ShouldNotReachHere();
1344       } else if (in_regs[i].first()->is_stack()) {
1345         // Nothing to do
1346       } else {
1347         ShouldNotReachHere();
1348       }
1349       if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1350         i++;
1351       }
1352     }
1353 
1354     save_or_restore_arguments(masm, stack_slots, total_in_args,
1355                               arg_save_area, NULL, in_regs, in_sig_bt);
1356   }
1357 #endif
1358 }
1359 
1360 // Unpack an array argument into a pointer to the body and the length
1361 // if the array is non-null, otherwise pass 0 for both.
1362 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1363   Register tmp_reg = rax;
1364   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1365          "possible collision");
1366   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1367          "possible collision");
1368 
1369   // Pass the length, ptr pair
1370   Label is_null, done;
1371   VMRegPair tmp(tmp_reg->as_VMReg());
1372   if (reg.first()->is_stack()) {
1373     // Load the arg up from the stack
1374     simple_move32(masm, reg, tmp);
1375     reg = tmp;
1376   }
1377   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1378   __ jccb(Assembler::equal, is_null);
1379   __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1380   simple_move32(masm, tmp, body_arg);
1381   // load the length relative to the body.
1382   __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1383                            arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1384   simple_move32(masm, tmp, length_arg);
1385   __ jmpb(done);
1386   __ bind(is_null);
1387   // Pass zeros
1388   __ xorptr(tmp_reg, tmp_reg);
1389   simple_move32(masm, tmp, body_arg);
1390   simple_move32(masm, tmp, length_arg);
1391   __ bind(done);
1392 }
1393 
1394 static void verify_oop_args(MacroAssembler* masm,
1395                             methodHandle method,
1396                             const BasicType* sig_bt,
1397                             const VMRegPair* regs) {
1398   Register temp_reg = rbx;  // not part of any compiled calling seq
1399   if (VerifyOops) {
1400     for (int i = 0; i < method->size_of_parameters(); i++) {
1401       if (sig_bt[i] == T_OBJECT ||
1402           sig_bt[i] == T_ARRAY) {
1403         VMReg r = regs[i].first();
1404         assert(r->is_valid(), "bad oop arg");
1405         if (r->is_stack()) {
1406           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1407           __ verify_oop(temp_reg);
1408         } else {
1409           __ verify_oop(r->as_Register());
1410         }
1411       }
1412     }
1413   }
1414 }
1415 
1416 static void gen_special_dispatch(MacroAssembler* masm,
1417                                  methodHandle method,
1418                                  const BasicType* sig_bt,
1419                                  const VMRegPair* regs) {
1420   verify_oop_args(masm, method, sig_bt, regs);
1421   vmIntrinsics::ID iid = method->intrinsic_id();
1422 
1423   // Now write the args into the outgoing interpreter space
1424   bool     has_receiver   = false;
1425   Register receiver_reg   = noreg;
1426   int      member_arg_pos = -1;
1427   Register member_reg     = noreg;
1428   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1429   if (ref_kind != 0) {
1430     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1431     member_reg = rbx;  // known to be free at this point
1432     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1433   } else if (iid == vmIntrinsics::_invokeBasic) {
1434     has_receiver = true;
1435   } else {
1436     fatal("unexpected intrinsic id %d", iid);
1437   }
1438 
1439   if (member_reg != noreg) {
1440     // Load the member_arg into register, if necessary.
1441     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1442     VMReg r = regs[member_arg_pos].first();
1443     if (r->is_stack()) {
1444       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1445     } else {
1446       // no data motion is needed
1447       member_reg = r->as_Register();
1448     }
1449   }
1450 
1451   if (has_receiver) {
1452     // Make sure the receiver is loaded into a register.
1453     assert(method->size_of_parameters() > 0, "oob");
1454     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1455     VMReg r = regs[0].first();
1456     assert(r->is_valid(), "bad receiver arg");
1457     if (r->is_stack()) {
1458       // Porting note:  This assumes that compiled calling conventions always
1459       // pass the receiver oop in a register.  If this is not true on some
1460       // platform, pick a temp and load the receiver from stack.
1461       fatal("receiver always in a register");
1462       receiver_reg = rcx;  // known to be free at this point
1463       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1464     } else {
1465       // no data motion is needed
1466       receiver_reg = r->as_Register();
1467     }
1468   }
1469 
1470   // Figure out which address we are really jumping to:
1471   MethodHandles::generate_method_handle_dispatch(masm, iid,
1472                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1473 }
1474 
1475 // ---------------------------------------------------------------------------
1476 // Generate a native wrapper for a given method.  The method takes arguments
1477 // in the Java compiled code convention, marshals them to the native
1478 // convention (handlizes oops, etc), transitions to native, makes the call,
1479 // returns to java state (possibly blocking), unhandlizes any result and
1480 // returns.
1481 //
1482 // Critical native functions are a shorthand for the use of
1483 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1484 // functions.  The wrapper is expected to unpack the arguments before
1485 // passing them to the callee and perform checks before and after the
1486 // native call to ensure that they GC_locker
1487 // lock_critical/unlock_critical semantics are followed.  Some other
1488 // parts of JNI setup are skipped like the tear down of the JNI handle
1489 // block and the check for pending exceptions it's impossible for them
1490 // to be thrown.
1491 //
1492 // They are roughly structured like this:
1493 //    if (GC_locker::needs_gc())
1494 //      SharedRuntime::block_for_jni_critical();
1495 //    tranistion to thread_in_native
1496 //    unpack arrray arguments and call native entry point
1497 //    check for safepoint in progress
1498 //    check if any thread suspend flags are set
1499 //      call into JVM and possible unlock the JNI critical
1500 //      if a GC was suppressed while in the critical native.
1501 //    transition back to thread_in_Java
1502 //    return to caller
1503 //
1504 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1505                                                 methodHandle method,
1506                                                 int compile_id,
1507                                                 BasicType* in_sig_bt,
1508                                                 VMRegPair* in_regs,
1509                                                 BasicType ret_type) {
1510   if (method->is_method_handle_intrinsic()) {
1511     vmIntrinsics::ID iid = method->intrinsic_id();
1512     intptr_t start = (intptr_t)__ pc();
1513     int vep_offset = ((intptr_t)__ pc()) - start;
1514     gen_special_dispatch(masm,
1515                          method,
1516                          in_sig_bt,
1517                          in_regs);
1518     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1519     __ flush();
1520     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1521     return nmethod::new_native_nmethod(method,
1522                                        compile_id,
1523                                        masm->code(),
1524                                        vep_offset,
1525                                        frame_complete,
1526                                        stack_slots / VMRegImpl::slots_per_word,
1527                                        in_ByteSize(-1),
1528                                        in_ByteSize(-1),
1529                                        (OopMapSet*)NULL);
1530   }
1531   bool is_critical_native = true;
1532   address native_func = method->critical_native_function();
1533   if (native_func == NULL) {
1534     native_func = method->native_function();
1535     is_critical_native = false;
1536   }
1537   assert(native_func != NULL, "must have function");
1538 
1539   // An OopMap for lock (and class if static)
1540   OopMapSet *oop_maps = new OopMapSet();
1541 
1542   // We have received a description of where all the java arg are located
1543   // on entry to the wrapper. We need to convert these args to where
1544   // the jni function will expect them. To figure out where they go
1545   // we convert the java signature to a C signature by inserting
1546   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1547 
1548   const int total_in_args = method->size_of_parameters();
1549   int total_c_args = total_in_args;
1550   if (!is_critical_native) {
1551     total_c_args += 1;
1552     if (method->is_static()) {
1553       total_c_args++;
1554     }
1555   } else {
1556     for (int i = 0; i < total_in_args; i++) {
1557       if (in_sig_bt[i] == T_ARRAY) {
1558         total_c_args++;
1559       }
1560     }
1561   }
1562 
1563   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1564   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1565   BasicType* in_elem_bt = NULL;
1566 
1567   int argc = 0;
1568   if (!is_critical_native) {
1569     out_sig_bt[argc++] = T_ADDRESS;
1570     if (method->is_static()) {
1571       out_sig_bt[argc++] = T_OBJECT;
1572     }
1573 
1574     for (int i = 0; i < total_in_args ; i++ ) {
1575       out_sig_bt[argc++] = in_sig_bt[i];
1576     }
1577   } else {
1578     Thread* THREAD = Thread::current();
1579     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1580     SignatureStream ss(method->signature());
1581     for (int i = 0; i < total_in_args ; i++ ) {
1582       if (in_sig_bt[i] == T_ARRAY) {
1583         // Arrays are passed as int, elem* pair
1584         out_sig_bt[argc++] = T_INT;
1585         out_sig_bt[argc++] = T_ADDRESS;
1586         Symbol* atype = ss.as_symbol(CHECK_NULL);
1587         const char* at = atype->as_C_string();
1588         if (strlen(at) == 2) {
1589           assert(at[0] == '[', "must be");
1590           switch (at[1]) {
1591             case 'B': in_elem_bt[i]  = T_BYTE; break;
1592             case 'C': in_elem_bt[i]  = T_CHAR; break;
1593             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1594             case 'F': in_elem_bt[i]  = T_FLOAT; break;
1595             case 'I': in_elem_bt[i]  = T_INT; break;
1596             case 'J': in_elem_bt[i]  = T_LONG; break;
1597             case 'S': in_elem_bt[i]  = T_SHORT; break;
1598             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1599             default: ShouldNotReachHere();
1600           }
1601         }
1602       } else {
1603         out_sig_bt[argc++] = in_sig_bt[i];
1604         in_elem_bt[i] = T_VOID;
1605       }
1606       if (in_sig_bt[i] != T_VOID) {
1607         assert(in_sig_bt[i] == ss.type(), "must match");
1608         ss.next();
1609       }
1610     }
1611   }
1612 
1613   // Now figure out where the args must be stored and how much stack space
1614   // they require.
1615   int out_arg_slots;
1616   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1617 
1618   // Compute framesize for the wrapper.  We need to handlize all oops in
1619   // registers a max of 2 on x86.
1620 
1621   // Calculate the total number of stack slots we will need.
1622 
1623   // First count the abi requirement plus all of the outgoing args
1624   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1625 
1626   // Now the space for the inbound oop handle area
1627   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1628   if (is_critical_native) {
1629     // Critical natives may have to call out so they need a save area
1630     // for register arguments.
1631     int double_slots = 0;
1632     int single_slots = 0;
1633     for ( int i = 0; i < total_in_args; i++) {
1634       if (in_regs[i].first()->is_Register()) {
1635         const Register reg = in_regs[i].first()->as_Register();
1636         switch (in_sig_bt[i]) {
1637           case T_ARRAY:  // critical array (uses 2 slots on LP64)
1638           case T_BOOLEAN:
1639           case T_BYTE:
1640           case T_SHORT:
1641           case T_CHAR:
1642           case T_INT:  single_slots++; break;
1643           case T_LONG: double_slots++; break;
1644           default:  ShouldNotReachHere();
1645         }
1646       } else if (in_regs[i].first()->is_XMMRegister()) {
1647         switch (in_sig_bt[i]) {
1648           case T_FLOAT:  single_slots++; break;
1649           case T_DOUBLE: double_slots++; break;
1650           default:  ShouldNotReachHere();
1651         }
1652       } else if (in_regs[i].first()->is_FloatRegister()) {
1653         ShouldNotReachHere();
1654       }
1655     }
1656     total_save_slots = double_slots * 2 + single_slots;
1657     // align the save area
1658     if (double_slots != 0) {
1659       stack_slots = round_to(stack_slots, 2);
1660     }
1661   }
1662 
1663   int oop_handle_offset = stack_slots;
1664   stack_slots += total_save_slots;
1665 
1666   // Now any space we need for handlizing a klass if static method
1667 
1668   int klass_slot_offset = 0;
1669   int klass_offset = -1;
1670   int lock_slot_offset = 0;
1671   bool is_static = false;
1672 
1673   if (method->is_static()) {
1674     klass_slot_offset = stack_slots;
1675     stack_slots += VMRegImpl::slots_per_word;
1676     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1677     is_static = true;
1678   }
1679 
1680   // Plus a lock if needed
1681 
1682   if (method->is_synchronized()) {
1683     lock_slot_offset = stack_slots;
1684     stack_slots += VMRegImpl::slots_per_word;
1685   }
1686 
1687   // Now a place (+2) to save return values or temp during shuffling
1688   // + 2 for return address (which we own) and saved rbp,
1689   stack_slots += 4;
1690 
1691   // Ok The space we have allocated will look like:
1692   //
1693   //
1694   // FP-> |                     |
1695   //      |---------------------|
1696   //      | 2 slots for moves   |
1697   //      |---------------------|
1698   //      | lock box (if sync)  |
1699   //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1700   //      | klass (if static)   |
1701   //      |---------------------| <- klass_slot_offset
1702   //      | oopHandle area      |
1703   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1704   //      | outbound memory     |
1705   //      | based arguments     |
1706   //      |                     |
1707   //      |---------------------|
1708   //      |                     |
1709   // SP-> | out_preserved_slots |
1710   //
1711   //
1712   // ****************************************************************************
1713   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1714   // arguments off of the stack after the jni call. Before the call we can use
1715   // instructions that are SP relative. After the jni call we switch to FP
1716   // relative instructions instead of re-adjusting the stack on windows.
1717   // ****************************************************************************
1718 
1719 
1720   // Now compute actual number of stack words we need rounding to make
1721   // stack properly aligned.
1722   stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1723 
1724   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1725 
1726   intptr_t start = (intptr_t)__ pc();
1727 
1728   // First thing make an ic check to see if we should even be here
1729 
1730   // We are free to use all registers as temps without saving them and
1731   // restoring them except rbp. rbp is the only callee save register
1732   // as far as the interpreter and the compiler(s) are concerned.
1733 
1734 
1735   const Register ic_reg = rax;
1736   const Register receiver = rcx;
1737   Label hit;
1738   Label exception_pending;
1739 
1740   __ verify_oop(receiver);
1741   __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1742   __ jcc(Assembler::equal, hit);
1743 
1744   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1745 
1746   // verified entry must be aligned for code patching.
1747   // and the first 5 bytes must be in the same cache line
1748   // if we align at 8 then we will be sure 5 bytes are in the same line
1749   __ align(8);
1750 
1751   __ bind(hit);
1752 
1753   int vep_offset = ((intptr_t)__ pc()) - start;
1754 
1755 #ifdef COMPILER1
1756   if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1757     // Object.hashCode can pull the hashCode from the header word
1758     // instead of doing a full VM transition once it's been computed.
1759     // Since hashCode is usually polymorphic at call sites we can't do
1760     // this optimization at the call site without a lot of work.
1761     Label slowCase;
1762     Register receiver = rcx;
1763     Register result = rax;
1764     __ movptr(result, Address(receiver, oopDesc::mark_offset_in_bytes()));
1765 
1766     // check if locked
1767     __ testptr(result, markOopDesc::unlocked_value);
1768     __ jcc (Assembler::zero, slowCase);
1769 
1770     if (UseBiasedLocking) {
1771       // Check if biased and fall through to runtime if so
1772       __ testptr(result, markOopDesc::biased_lock_bit_in_place);
1773       __ jcc (Assembler::notZero, slowCase);
1774     }
1775 
1776     // get hash
1777     __ andptr(result, markOopDesc::hash_mask_in_place);
1778     // test if hashCode exists
1779     __ jcc  (Assembler::zero, slowCase);
1780     __ shrptr(result, markOopDesc::hash_shift);
1781     __ ret(0);
1782     __ bind (slowCase);
1783   }
1784 #endif // COMPILER1
1785 
1786   // The instruction at the verified entry point must be 5 bytes or longer
1787   // because it can be patched on the fly by make_non_entrant. The stack bang
1788   // instruction fits that requirement.
1789 
1790   // Generate stack overflow check
1791 
1792   if (UseStackBanging) {
1793     __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1794   } else {
1795     // need a 5 byte instruction to allow MT safe patching to non-entrant
1796     __ fat_nop();
1797   }
1798 
1799   // Generate a new frame for the wrapper.
1800   __ enter();
1801   // -2 because return address is already present and so is saved rbp
1802   __ subptr(rsp, stack_size - 2*wordSize);
1803 
1804   // Frame is now completed as far as size and linkage.
1805   int frame_complete = ((intptr_t)__ pc()) - start;
1806 
1807   if (UseRTMLocking) {
1808     // Abort RTM transaction before calling JNI
1809     // because critical section will be large and will be
1810     // aborted anyway. Also nmethod could be deoptimized.
1811     __ xabort(0);
1812   }
1813 
1814   // Calculate the difference between rsp and rbp,. We need to know it
1815   // after the native call because on windows Java Natives will pop
1816   // the arguments and it is painful to do rsp relative addressing
1817   // in a platform independent way. So after the call we switch to
1818   // rbp, relative addressing.
1819 
1820   int fp_adjustment = stack_size - 2*wordSize;
1821 
1822 #ifdef COMPILER2
1823   // C2 may leave the stack dirty if not in SSE2+ mode
1824   if (UseSSE >= 2) {
1825     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1826   } else {
1827     __ empty_FPU_stack();
1828   }
1829 #endif /* COMPILER2 */
1830 
1831   // Compute the rbp, offset for any slots used after the jni call
1832 
1833   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1834 
1835   // We use rdi as a thread pointer because it is callee save and
1836   // if we load it once it is usable thru the entire wrapper
1837   const Register thread = rdi;
1838 
1839   // We use rsi as the oop handle for the receiver/klass
1840   // It is callee save so it survives the call to native
1841 
1842   const Register oop_handle_reg = rsi;
1843 
1844   __ get_thread(thread);
1845 
1846   if (is_critical_native) {
1847     check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
1848                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1849   }
1850 
1851   //
1852   // We immediately shuffle the arguments so that any vm call we have to
1853   // make from here on out (sync slow path, jvmti, etc.) we will have
1854   // captured the oops from our caller and have a valid oopMap for
1855   // them.
1856 
1857   // -----------------
1858   // The Grand Shuffle
1859   //
1860   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1861   // and, if static, the class mirror instead of a receiver.  This pretty much
1862   // guarantees that register layout will not match (and x86 doesn't use reg
1863   // parms though amd does).  Since the native abi doesn't use register args
1864   // and the java conventions does we don't have to worry about collisions.
1865   // All of our moved are reg->stack or stack->stack.
1866   // We ignore the extra arguments during the shuffle and handle them at the
1867   // last moment. The shuffle is described by the two calling convention
1868   // vectors we have in our possession. We simply walk the java vector to
1869   // get the source locations and the c vector to get the destinations.
1870 
1871   int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
1872 
1873   // Record rsp-based slot for receiver on stack for non-static methods
1874   int receiver_offset = -1;
1875 
1876   // This is a trick. We double the stack slots so we can claim
1877   // the oops in the caller's frame. Since we are sure to have
1878   // more args than the caller doubling is enough to make
1879   // sure we can capture all the incoming oop args from the
1880   // caller.
1881   //
1882   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1883 
1884   // Mark location of rbp,
1885   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1886 
1887   // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1888   // Are free to temporaries if we have to do  stack to steck moves.
1889   // All inbound args are referenced based on rbp, and all outbound args via rsp.
1890 
1891   for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1892     switch (in_sig_bt[i]) {
1893       case T_ARRAY:
1894         if (is_critical_native) {
1895           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1896           c_arg++;
1897           break;
1898         }
1899       case T_OBJECT:
1900         assert(!is_critical_native, "no oop arguments");
1901         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1902                     ((i == 0) && (!is_static)),
1903                     &receiver_offset);
1904         break;
1905       case T_VOID:
1906         break;
1907 
1908       case T_FLOAT:
1909         float_move(masm, in_regs[i], out_regs[c_arg]);
1910           break;
1911 
1912       case T_DOUBLE:
1913         assert( i + 1 < total_in_args &&
1914                 in_sig_bt[i + 1] == T_VOID &&
1915                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1916         double_move(masm, in_regs[i], out_regs[c_arg]);
1917         break;
1918 
1919       case T_LONG :
1920         long_move(masm, in_regs[i], out_regs[c_arg]);
1921         break;
1922 
1923       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1924 
1925       default:
1926         simple_move32(masm, in_regs[i], out_regs[c_arg]);
1927     }
1928   }
1929 
1930   // Pre-load a static method's oop into rsi.  Used both by locking code and
1931   // the normal JNI call code.
1932   if (method->is_static() && !is_critical_native) {
1933 
1934     //  load opp into a register
1935     __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1936 
1937     // Now handlize the static class mirror it's known not-null.
1938     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1939     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1940 
1941     // Now get the handle
1942     __ lea(oop_handle_reg, Address(rsp, klass_offset));
1943     // store the klass handle as second argument
1944     __ movptr(Address(rsp, wordSize), oop_handle_reg);
1945   }
1946 
1947   // Change state to native (we save the return address in the thread, since it might not
1948   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1949   // points into the right code segment. It does not have to be the correct return pc.
1950   // We use the same pc/oopMap repeatedly when we call out
1951 
1952   intptr_t the_pc = (intptr_t) __ pc();
1953   oop_maps->add_gc_map(the_pc - start, map);
1954 
1955   __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
1956 
1957 
1958   // We have all of the arguments setup at this point. We must not touch any register
1959   // argument registers at this point (what if we save/restore them there are no oop?
1960 
1961   {
1962     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1963     __ mov_metadata(rax, method());
1964     __ call_VM_leaf(
1965          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1966          thread, rax);
1967   }
1968 
1969   // RedefineClasses() tracing support for obsolete method entry
1970   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1971     __ mov_metadata(rax, method());
1972     __ call_VM_leaf(
1973          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1974          thread, rax);
1975   }
1976 
1977   // These are register definitions we need for locking/unlocking
1978   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1979   const Register obj_reg  = rcx;  // Will contain the oop
1980   const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1981 
1982   Label slow_path_lock;
1983   Label lock_done;
1984 
1985   // Lock a synchronized method
1986   if (method->is_synchronized()) {
1987     assert(!is_critical_native, "unhandled");
1988 
1989 
1990     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1991 
1992     // Get the handle (the 2nd argument)
1993     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1994 
1995     // Get address of the box
1996 
1997     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1998 
1999     // Load the oop from the handle
2000     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2001 
2002     if (UseBiasedLocking) {
2003       // Note that oop_handle_reg is trashed during this call
2004       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
2005     }
2006 
2007     // Load immediate 1 into swap_reg %rax,
2008     __ movptr(swap_reg, 1);
2009 
2010     // Load (object->mark() | 1) into swap_reg %rax,
2011     __ orptr(swap_reg, Address(obj_reg, 0));
2012 
2013     // Save (object->mark() | 1) into BasicLock's displaced header
2014     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2015 
2016     if (os::is_MP()) {
2017       __ lock();
2018     }
2019 
2020     // src -> dest iff dest == rax, else rax, <- dest
2021     // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
2022     __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2023     __ jcc(Assembler::equal, lock_done);
2024 
2025     // Test if the oopMark is an obvious stack pointer, i.e.,
2026     //  1) (mark & 3) == 0, and
2027     //  2) rsp <= mark < mark + os::pagesize()
2028     // These 3 tests can be done by evaluating the following
2029     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2030     // assuming both stack pointer and pagesize have their
2031     // least significant 2 bits clear.
2032     // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
2033 
2034     __ subptr(swap_reg, rsp);
2035     __ andptr(swap_reg, 3 - os::vm_page_size());
2036 
2037     // Save the test result, for recursive case, the result is zero
2038     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2039     __ jcc(Assembler::notEqual, slow_path_lock);
2040     // Slow path will re-enter here
2041     __ bind(lock_done);
2042 
2043     if (UseBiasedLocking) {
2044       // Re-fetch oop_handle_reg as we trashed it above
2045       __ movptr(oop_handle_reg, Address(rsp, wordSize));
2046     }
2047   }
2048 
2049 
2050   // Finally just about ready to make the JNI call
2051 
2052 
2053   // get JNIEnv* which is first argument to native
2054   if (!is_critical_native) {
2055     __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
2056     __ movptr(Address(rsp, 0), rdx);
2057   }
2058 
2059   // Now set thread in native
2060   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
2061 
2062   __ call(RuntimeAddress(native_func));
2063 
2064   // Verify or restore cpu control state after JNI call
2065   __ restore_cpu_control_state_after_jni();
2066 
2067   // WARNING - on Windows Java Natives use pascal calling convention and pop the
2068   // arguments off of the stack. We could just re-adjust the stack pointer here
2069   // and continue to do SP relative addressing but we instead switch to FP
2070   // relative addressing.
2071 
2072   // Unpack native results.
2073   switch (ret_type) {
2074   case T_BOOLEAN: __ c2bool(rax);            break;
2075   case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
2076   case T_BYTE   : __ sign_extend_byte (rax); break;
2077   case T_SHORT  : __ sign_extend_short(rax); break;
2078   case T_INT    : /* nothing to do */        break;
2079   case T_DOUBLE :
2080   case T_FLOAT  :
2081     // Result is in st0 we'll save as needed
2082     break;
2083   case T_ARRAY:                 // Really a handle
2084   case T_OBJECT:                // Really a handle
2085       break; // can't de-handlize until after safepoint check
2086   case T_VOID: break;
2087   case T_LONG: break;
2088   default       : ShouldNotReachHere();
2089   }
2090 
2091   // Switch thread to "native transition" state before reading the synchronization state.
2092   // This additional state is necessary because reading and testing the synchronization
2093   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2094   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2095   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2096   //     Thread A is resumed to finish this native method, but doesn't block here since it
2097   //     didn't see any synchronization is progress, and escapes.
2098   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2099 
2100   if(os::is_MP()) {
2101     if (UseMembar) {
2102       // Force this write out before the read below
2103       __ membar(Assembler::Membar_mask_bits(
2104            Assembler::LoadLoad | Assembler::LoadStore |
2105            Assembler::StoreLoad | Assembler::StoreStore));
2106     } else {
2107       // Write serialization page so VM thread can do a pseudo remote membar.
2108       // We use the current thread pointer to calculate a thread specific
2109       // offset to write to within the page. This minimizes bus traffic
2110       // due to cache line collision.
2111       __ serialize_memory(thread, rcx);
2112     }
2113   }
2114 
2115   if (AlwaysRestoreFPU) {
2116     // Make sure the control word is correct.
2117     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2118   }
2119 
2120   Label after_transition;
2121 
2122   // check for safepoint operation in progress and/or pending suspend requests
2123   { Label Continue;
2124 
2125     __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2126              SafepointSynchronize::_not_synchronized);
2127 
2128     Label L;
2129     __ jcc(Assembler::notEqual, L);
2130     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
2131     __ jcc(Assembler::equal, Continue);
2132     __ bind(L);
2133 
2134     // Don't use call_VM as it will see a possible pending exception and forward it
2135     // and never return here preventing us from clearing _last_native_pc down below.
2136     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2137     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2138     // by hand.
2139     //
2140     save_native_result(masm, ret_type, stack_slots);
2141     __ push(thread);
2142     if (!is_critical_native) {
2143       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2144                                               JavaThread::check_special_condition_for_native_trans)));
2145     } else {
2146       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2147                                               JavaThread::check_special_condition_for_native_trans_and_transition)));
2148     }
2149     __ increment(rsp, wordSize);
2150     // Restore any method result value
2151     restore_native_result(masm, ret_type, stack_slots);
2152 
2153     if (is_critical_native) {
2154       // The call above performed the transition to thread_in_Java so
2155       // skip the transition logic below.
2156       __ jmpb(after_transition);
2157     }
2158 
2159     __ bind(Continue);
2160   }
2161 
2162   // change thread state
2163   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
2164   __ bind(after_transition);
2165 
2166   Label reguard;
2167   Label reguard_done;
2168   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
2169   __ jcc(Assembler::equal, reguard);
2170 
2171   // slow path reguard  re-enters here
2172   __ bind(reguard_done);
2173 
2174   // Handle possible exception (will unlock if necessary)
2175 
2176   // native result if any is live
2177 
2178   // Unlock
2179   Label slow_path_unlock;
2180   Label unlock_done;
2181   if (method->is_synchronized()) {
2182 
2183     Label done;
2184 
2185     // Get locked oop from the handle we passed to jni
2186     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2187 
2188     if (UseBiasedLocking) {
2189       __ biased_locking_exit(obj_reg, rbx, done);
2190     }
2191 
2192     // Simple recursive lock?
2193 
2194     __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2195     __ jcc(Assembler::equal, done);
2196 
2197     // Must save rax, if if it is live now because cmpxchg must use it
2198     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2199       save_native_result(masm, ret_type, stack_slots);
2200     }
2201 
2202     //  get old displaced header
2203     __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2204 
2205     // get address of the stack lock
2206     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2207 
2208     // Atomic swap old header if oop still contains the stack lock
2209     if (os::is_MP()) {
2210     __ lock();
2211     }
2212 
2213     // src -> dest iff dest == rax, else rax, <- dest
2214     // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2215     __ cmpxchgptr(rbx, Address(obj_reg, 0));
2216     __ jcc(Assembler::notEqual, slow_path_unlock);
2217 
2218     // slow path re-enters here
2219     __ bind(unlock_done);
2220     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2221       restore_native_result(masm, ret_type, stack_slots);
2222     }
2223 
2224     __ bind(done);
2225 
2226   }
2227 
2228   {
2229     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2230     // Tell dtrace about this method exit
2231     save_native_result(masm, ret_type, stack_slots);
2232     __ mov_metadata(rax, method());
2233     __ call_VM_leaf(
2234          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2235          thread, rax);
2236     restore_native_result(masm, ret_type, stack_slots);
2237   }
2238 
2239   // We can finally stop using that last_Java_frame we setup ages ago
2240 
2241   __ reset_last_Java_frame(thread, false, true);
2242 
2243   // Unpack oop result
2244   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2245       Label L;
2246       __ cmpptr(rax, (int32_t)NULL_WORD);
2247       __ jcc(Assembler::equal, L);
2248       __ movptr(rax, Address(rax, 0));
2249       __ bind(L);
2250       __ verify_oop(rax);
2251   }
2252 
2253   if (!is_critical_native) {
2254     // reset handle block
2255     __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
2256     __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
2257 
2258     // Any exception pending?
2259     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2260     __ jcc(Assembler::notEqual, exception_pending);
2261   }
2262 
2263   // no exception, we're almost done
2264 
2265   // check that only result value is on FPU stack
2266   __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
2267 
2268   // Fixup floating pointer results so that result looks like a return from a compiled method
2269   if (ret_type == T_FLOAT) {
2270     if (UseSSE >= 1) {
2271       // Pop st0 and store as float and reload into xmm register
2272       __ fstp_s(Address(rbp, -4));
2273       __ movflt(xmm0, Address(rbp, -4));
2274     }
2275   } else if (ret_type == T_DOUBLE) {
2276     if (UseSSE >= 2) {
2277       // Pop st0 and store as double and reload into xmm register
2278       __ fstp_d(Address(rbp, -8));
2279       __ movdbl(xmm0, Address(rbp, -8));
2280     }
2281   }
2282 
2283   // Return
2284 
2285   __ leave();
2286   __ ret(0);
2287 
2288   // Unexpected paths are out of line and go here
2289 
2290   // Slow path locking & unlocking
2291   if (method->is_synchronized()) {
2292 
2293     // BEGIN Slow path lock
2294 
2295     __ bind(slow_path_lock);
2296 
2297     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2298     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2299     __ push(thread);
2300     __ push(lock_reg);
2301     __ push(obj_reg);
2302     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
2303     __ addptr(rsp, 3*wordSize);
2304 
2305 #ifdef ASSERT
2306     { Label L;
2307     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2308     __ jcc(Assembler::equal, L);
2309     __ stop("no pending exception allowed on exit from monitorenter");
2310     __ bind(L);
2311     }
2312 #endif
2313     __ jmp(lock_done);
2314 
2315     // END Slow path lock
2316 
2317     // BEGIN Slow path unlock
2318     __ bind(slow_path_unlock);
2319 
2320     // Slow path unlock
2321 
2322     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2323       save_native_result(masm, ret_type, stack_slots);
2324     }
2325     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2326 
2327     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2328     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2329 
2330 
2331     // should be a peal
2332     // +wordSize because of the push above
2333     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2334     __ push(thread);
2335     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2336     __ push(rax);
2337 
2338     __ push(obj_reg);
2339     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2340     __ addptr(rsp, 3*wordSize);
2341 #ifdef ASSERT
2342     {
2343       Label L;
2344       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2345       __ jcc(Assembler::equal, L);
2346       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2347       __ bind(L);
2348     }
2349 #endif /* ASSERT */
2350 
2351     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2352 
2353     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2354       restore_native_result(masm, ret_type, stack_slots);
2355     }
2356     __ jmp(unlock_done);
2357     // END Slow path unlock
2358 
2359   }
2360 
2361   // SLOW PATH Reguard the stack if needed
2362 
2363   __ bind(reguard);
2364   save_native_result(masm, ret_type, stack_slots);
2365   {
2366     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2367   }
2368   restore_native_result(masm, ret_type, stack_slots);
2369   __ jmp(reguard_done);
2370 
2371 
2372   // BEGIN EXCEPTION PROCESSING
2373 
2374   if (!is_critical_native) {
2375     // Forward  the exception
2376     __ bind(exception_pending);
2377 
2378     // remove possible return value from FPU register stack
2379     __ empty_FPU_stack();
2380 
2381     // pop our frame
2382     __ leave();
2383     // and forward the exception
2384     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2385   }
2386 
2387   __ flush();
2388 
2389   nmethod *nm = nmethod::new_native_nmethod(method,
2390                                             compile_id,
2391                                             masm->code(),
2392                                             vep_offset,
2393                                             frame_complete,
2394                                             stack_slots / VMRegImpl::slots_per_word,
2395                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2396                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2397                                             oop_maps);
2398 
2399   if (is_critical_native) {
2400     nm->set_lazy_critical_native(true);
2401   }
2402 
2403   return nm;
2404 
2405 }
2406 
2407 // this function returns the adjust size (in number of words) to a c2i adapter
2408 // activation for use during deoptimization
2409 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2410   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2411 }
2412 
2413 
2414 uint SharedRuntime::out_preserve_stack_slots() {
2415   return 0;
2416 }
2417 
2418 //------------------------------generate_deopt_blob----------------------------
2419 void SharedRuntime::generate_deopt_blob() {
2420   // allocate space for the code
2421   ResourceMark rm;
2422   // setup code generation tools
2423   // note: the buffer code size must account for StackShadowPages=50
2424   CodeBuffer   buffer("deopt_blob", 1536, 1024);
2425   MacroAssembler* masm = new MacroAssembler(&buffer);
2426   int frame_size_in_words;
2427   OopMap* map = NULL;
2428   // Account for the extra args we place on the stack
2429   // by the time we call fetch_unroll_info
2430   const int additional_words = 2; // deopt kind, thread
2431 
2432   OopMapSet *oop_maps = new OopMapSet();
2433 
2434   // -------------
2435   // This code enters when returning to a de-optimized nmethod.  A return
2436   // address has been pushed on the the stack, and return values are in
2437   // registers.
2438   // If we are doing a normal deopt then we were called from the patched
2439   // nmethod from the point we returned to the nmethod. So the return
2440   // address on the stack is wrong by NativeCall::instruction_size
2441   // We will adjust the value to it looks like we have the original return
2442   // address on the stack (like when we eagerly deoptimized).
2443   // In the case of an exception pending with deoptimized then we enter
2444   // with a return address on the stack that points after the call we patched
2445   // into the exception handler. We have the following register state:
2446   //    rax,: exception
2447   //    rbx,: exception handler
2448   //    rdx: throwing pc
2449   // So in this case we simply jam rdx into the useless return address and
2450   // the stack looks just like we want.
2451   //
2452   // At this point we need to de-opt.  We save the argument return
2453   // registers.  We call the first C routine, fetch_unroll_info().  This
2454   // routine captures the return values and returns a structure which
2455   // describes the current frame size and the sizes of all replacement frames.
2456   // The current frame is compiled code and may contain many inlined
2457   // functions, each with their own JVM state.  We pop the current frame, then
2458   // push all the new frames.  Then we call the C routine unpack_frames() to
2459   // populate these frames.  Finally unpack_frames() returns us the new target
2460   // address.  Notice that callee-save registers are BLOWN here; they have
2461   // already been captured in the vframeArray at the time the return PC was
2462   // patched.
2463   address start = __ pc();
2464   Label cont;
2465 
2466   // Prolog for non exception case!
2467 
2468   // Save everything in sight.
2469 
2470   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2471   // Normal deoptimization
2472   __ push(Deoptimization::Unpack_deopt);
2473   __ jmp(cont);
2474 
2475   int reexecute_offset = __ pc() - start;
2476 
2477   // Reexecute case
2478   // return address is the pc describes what bci to do re-execute at
2479 
2480   // No need to update map as each call to save_live_registers will produce identical oopmap
2481   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2482 
2483   __ push(Deoptimization::Unpack_reexecute);
2484   __ jmp(cont);
2485 
2486   int exception_offset = __ pc() - start;
2487 
2488   // Prolog for exception case
2489 
2490   // all registers are dead at this entry point, except for rax, and
2491   // rdx which contain the exception oop and exception pc
2492   // respectively.  Set them in TLS and fall thru to the
2493   // unpack_with_exception_in_tls entry point.
2494 
2495   __ get_thread(rdi);
2496   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2497   __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2498 
2499   int exception_in_tls_offset = __ pc() - start;
2500 
2501   // new implementation because exception oop is now passed in JavaThread
2502 
2503   // Prolog for exception case
2504   // All registers must be preserved because they might be used by LinearScan
2505   // Exceptiop oop and throwing PC are passed in JavaThread
2506   // tos: stack at point of call to method that threw the exception (i.e. only
2507   // args are on the stack, no return address)
2508 
2509   // make room on stack for the return address
2510   // It will be patched later with the throwing pc. The correct value is not
2511   // available now because loading it from memory would destroy registers.
2512   __ push(0);
2513 
2514   // Save everything in sight.
2515 
2516   // No need to update map as each call to save_live_registers will produce identical oopmap
2517   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2518 
2519   // Now it is safe to overwrite any register
2520 
2521   // store the correct deoptimization type
2522   __ push(Deoptimization::Unpack_exception);
2523 
2524   // load throwing pc from JavaThread and patch it as the return address
2525   // of the current frame. Then clear the field in JavaThread
2526   __ get_thread(rdi);
2527   __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2528   __ movptr(Address(rbp, wordSize), rdx);
2529   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2530 
2531 #ifdef ASSERT
2532   // verify that there is really an exception oop in JavaThread
2533   __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2534   __ verify_oop(rax);
2535 
2536   // verify that there is no pending exception
2537   Label no_pending_exception;
2538   __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2539   __ testptr(rax, rax);
2540   __ jcc(Assembler::zero, no_pending_exception);
2541   __ stop("must not have pending exception here");
2542   __ bind(no_pending_exception);
2543 #endif
2544 
2545   __ bind(cont);
2546 
2547   // Compiled code leaves the floating point stack dirty, empty it.
2548   __ empty_FPU_stack();
2549 
2550 
2551   // Call C code.  Need thread and this frame, but NOT official VM entry
2552   // crud.  We cannot block on this call, no GC can happen.
2553   __ get_thread(rcx);
2554   __ push(rcx);
2555   // fetch_unroll_info needs to call last_java_frame()
2556   __ set_last_Java_frame(rcx, noreg, noreg, NULL);
2557 
2558   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2559 
2560   // Need to have an oopmap that tells fetch_unroll_info where to
2561   // find any register it might need.
2562 
2563   oop_maps->add_gc_map( __ pc()-start, map);
2564 
2565   // Discard arg to fetch_unroll_info
2566   __ pop(rcx);
2567 
2568   __ get_thread(rcx);
2569   __ reset_last_Java_frame(rcx, false, false);
2570 
2571   // Load UnrollBlock into EDI
2572   __ mov(rdi, rax);
2573 
2574   // Move the unpack kind to a safe place in the UnrollBlock because
2575   // we are very short of registers
2576 
2577   Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2578   // retrieve the deopt kind from where we left it.
2579   __ pop(rax);
2580   __ movl(unpack_kind, rax);                      // save the unpack_kind value
2581 
2582    Label noException;
2583   __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
2584   __ jcc(Assembler::notEqual, noException);
2585   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2586   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2587   __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2588   __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2589 
2590   __ verify_oop(rax);
2591 
2592   // Overwrite the result registers with the exception results.
2593   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2594   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2595 
2596   __ bind(noException);
2597 
2598   // Stack is back to only having register save data on the stack.
2599   // Now restore the result registers. Everything else is either dead or captured
2600   // in the vframeArray.
2601 
2602   RegisterSaver::restore_result_registers(masm);
2603 
2604   // Non standard control word may be leaked out through a safepoint blob, and we can
2605   // deopt at a poll point with the non standard control word. However, we should make
2606   // sure the control word is correct after restore_result_registers.
2607   __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2608 
2609   // All of the register save area has been popped of the stack. Only the
2610   // return address remains.
2611 
2612   // Pop all the frames we must move/replace.
2613   //
2614   // Frame picture (youngest to oldest)
2615   // 1: self-frame (no frame link)
2616   // 2: deopting frame  (no frame link)
2617   // 3: caller of deopting frame (could be compiled/interpreted).
2618   //
2619   // Note: by leaving the return address of self-frame on the stack
2620   // and using the size of frame 2 to adjust the stack
2621   // when we are done the return to frame 3 will still be on the stack.
2622 
2623   // Pop deoptimized frame
2624   __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2625 
2626   // sp should be pointing at the return address to the caller (3)
2627 
2628   // Pick up the initial fp we should save
2629   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2630   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2631 
2632 #ifdef ASSERT
2633   // Compilers generate code that bang the stack by as much as the
2634   // interpreter would need. So this stack banging should never
2635   // trigger a fault. Verify that it does not on non product builds.
2636   if (UseStackBanging) {
2637     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2638     __ bang_stack_size(rbx, rcx);
2639   }
2640 #endif
2641 
2642   // Load array of frame pcs into ECX
2643   __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2644 
2645   __ pop(rsi); // trash the old pc
2646 
2647   // Load array of frame sizes into ESI
2648   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2649 
2650   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2651 
2652   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2653   __ movl(counter, rbx);
2654 
2655   // Now adjust the caller's stack to make up for the extra locals
2656   // but record the original sp so that we can save it in the skeletal interpreter
2657   // frame and the stack walking of interpreter_sender will get the unextended sp
2658   // value and not the "real" sp value.
2659 
2660   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2661   __ movptr(sp_temp, rsp);
2662   __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2663   __ subptr(rsp, rbx);
2664 
2665   // Push interpreter frames in a loop
2666   Label loop;
2667   __ bind(loop);
2668   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2669 #ifdef CC_INTERP
2670   __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
2671 #ifdef ASSERT
2672   __ push(0xDEADDEAD);                  // Make a recognizable pattern
2673   __ push(0xDEADDEAD);
2674 #else /* ASSERT */
2675   __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
2676 #endif /* ASSERT */
2677 #else /* CC_INTERP */
2678   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2679 #endif /* CC_INTERP */
2680   __ pushptr(Address(rcx, 0));          // save return address
2681   __ enter();                           // save old & set new rbp,
2682   __ subptr(rsp, rbx);                  // Prolog!
2683   __ movptr(rbx, sp_temp);              // sender's sp
2684 #ifdef CC_INTERP
2685   __ movptr(Address(rbp,
2686                   -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
2687           rbx); // Make it walkable
2688 #else /* CC_INTERP */
2689   // This value is corrected by layout_activation_impl
2690   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2691   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2692 #endif /* CC_INTERP */
2693   __ movptr(sp_temp, rsp);              // pass to next frame
2694   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2695   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2696   __ decrementl(counter);             // decrement counter
2697   __ jcc(Assembler::notZero, loop);
2698   __ pushptr(Address(rcx, 0));          // save final return address
2699 
2700   // Re-push self-frame
2701   __ enter();                           // save old & set new rbp,
2702 
2703   //  Return address and rbp, are in place
2704   // We'll push additional args later. Just allocate a full sized
2705   // register save area
2706   __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2707 
2708   // Restore frame locals after moving the frame
2709   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2710   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2711   __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
2712   if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2713   if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2714 
2715   // Set up the args to unpack_frame
2716 
2717   __ pushl(unpack_kind);                     // get the unpack_kind value
2718   __ get_thread(rcx);
2719   __ push(rcx);
2720 
2721   // set last_Java_sp, last_Java_fp
2722   __ set_last_Java_frame(rcx, noreg, rbp, NULL);
2723 
2724   // Call C code.  Need thread but NOT official VM entry
2725   // crud.  We cannot block on this call, no GC can happen.  Call should
2726   // restore return values to their stack-slots with the new SP.
2727   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2728   // Set an oopmap for the call site
2729   oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2730 
2731   // rax, contains the return result type
2732   __ push(rax);
2733 
2734   __ get_thread(rcx);
2735   __ reset_last_Java_frame(rcx, false, false);
2736 
2737   // Collect return values
2738   __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2739   __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2740 
2741   // Clear floating point stack before returning to interpreter
2742   __ empty_FPU_stack();
2743 
2744   // Check if we should push the float or double return value.
2745   Label results_done, yes_double_value;
2746   __ cmpl(Address(rsp, 0), T_DOUBLE);
2747   __ jcc (Assembler::zero, yes_double_value);
2748   __ cmpl(Address(rsp, 0), T_FLOAT);
2749   __ jcc (Assembler::notZero, results_done);
2750 
2751   // return float value as expected by interpreter
2752   if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2753   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2754   __ jmp(results_done);
2755 
2756   // return double value as expected by interpreter
2757   __ bind(yes_double_value);
2758   if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2759   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2760 
2761   __ bind(results_done);
2762 
2763   // Pop self-frame.
2764   __ leave();                              // Epilog!
2765 
2766   // Jump to interpreter
2767   __ ret(0);
2768 
2769   // -------------
2770   // make sure all code is generated
2771   masm->flush();
2772 
2773   _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2774   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2775 }
2776 
2777 
2778 #ifdef COMPILER2
2779 //------------------------------generate_uncommon_trap_blob--------------------
2780 void SharedRuntime::generate_uncommon_trap_blob() {
2781   // allocate space for the code
2782   ResourceMark rm;
2783   // setup code generation tools
2784   CodeBuffer   buffer("uncommon_trap_blob", 512, 512);
2785   MacroAssembler* masm = new MacroAssembler(&buffer);
2786 
2787   enum frame_layout {
2788     arg0_off,      // thread                     sp + 0 // Arg location for
2789     arg1_off,      // unloaded_class_index       sp + 1 // calling C
2790     // The frame sender code expects that rbp will be in the "natural" place and
2791     // will override any oopMap setting for it. We must therefore force the layout
2792     // so that it agrees with the frame sender code.
2793     rbp_off,       // callee saved register      sp + 2
2794     return_off,    // slot for return address    sp + 3
2795     framesize
2796   };
2797 
2798   address start = __ pc();
2799 
2800   if (UseRTMLocking) {
2801     // Abort RTM transaction before possible nmethod deoptimization.
2802     __ xabort(0);
2803   }
2804 
2805   // Push self-frame.
2806   __ subptr(rsp, return_off*wordSize);     // Epilog!
2807 
2808   // rbp, is an implicitly saved callee saved register (i.e. the calling
2809   // convention will save restore it in prolog/epilog) Other than that
2810   // there are no callee save registers no that adapter frames are gone.
2811   __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2812 
2813   // Clear the floating point exception stack
2814   __ empty_FPU_stack();
2815 
2816   // set last_Java_sp
2817   __ get_thread(rdx);
2818   __ set_last_Java_frame(rdx, noreg, noreg, NULL);
2819 
2820   // Call C code.  Need thread but NOT official VM entry
2821   // crud.  We cannot block on this call, no GC can happen.  Call should
2822   // capture callee-saved registers as well as return values.
2823   __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2824   // argument already in ECX
2825   __ movl(Address(rsp, arg1_off*wordSize),rcx);
2826   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2827 
2828   // Set an oopmap for the call site
2829   OopMapSet *oop_maps = new OopMapSet();
2830   OopMap* map =  new OopMap( framesize, 0 );
2831   // No oopMap for rbp, it is known implicitly
2832 
2833   oop_maps->add_gc_map( __ pc()-start, map);
2834 
2835   __ get_thread(rcx);
2836 
2837   __ reset_last_Java_frame(rcx, false, false);
2838 
2839   // Load UnrollBlock into EDI
2840   __ movptr(rdi, rax);
2841 
2842   // Pop all the frames we must move/replace.
2843   //
2844   // Frame picture (youngest to oldest)
2845   // 1: self-frame (no frame link)
2846   // 2: deopting frame  (no frame link)
2847   // 3: caller of deopting frame (could be compiled/interpreted).
2848 
2849   // Pop self-frame.  We have no frame, and must rely only on EAX and ESP.
2850   __ addptr(rsp,(framesize-1)*wordSize);     // Epilog!
2851 
2852   // Pop deoptimized frame
2853   __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2854   __ addptr(rsp, rcx);
2855 
2856   // sp should be pointing at the return address to the caller (3)
2857 
2858   // Pick up the initial fp we should save
2859   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2860   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2861 
2862 #ifdef ASSERT
2863   // Compilers generate code that bang the stack by as much as the
2864   // interpreter would need. So this stack banging should never
2865   // trigger a fault. Verify that it does not on non product builds.
2866   if (UseStackBanging) {
2867     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2868     __ bang_stack_size(rbx, rcx);
2869   }
2870 #endif
2871 
2872   // Load array of frame pcs into ECX
2873   __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2874 
2875   __ pop(rsi); // trash the pc
2876 
2877   // Load array of frame sizes into ESI
2878   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2879 
2880   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2881 
2882   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2883   __ movl(counter, rbx);
2884 
2885   // Now adjust the caller's stack to make up for the extra locals
2886   // but record the original sp so that we can save it in the skeletal interpreter
2887   // frame and the stack walking of interpreter_sender will get the unextended sp
2888   // value and not the "real" sp value.
2889 
2890   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2891   __ movptr(sp_temp, rsp);
2892   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2893   __ subptr(rsp, rbx);
2894 
2895   // Push interpreter frames in a loop
2896   Label loop;
2897   __ bind(loop);
2898   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2899 #ifdef CC_INTERP
2900   __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
2901 #ifdef ASSERT
2902   __ push(0xDEADDEAD);                  // Make a recognizable pattern
2903   __ push(0xDEADDEAD);                  // (parm to RecursiveInterpreter...)
2904 #else /* ASSERT */
2905   __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
2906 #endif /* ASSERT */
2907 #else /* CC_INTERP */
2908   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2909 #endif /* CC_INTERP */
2910   __ pushptr(Address(rcx, 0));          // save return address
2911   __ enter();                           // save old & set new rbp,
2912   __ subptr(rsp, rbx);                  // Prolog!
2913   __ movptr(rbx, sp_temp);              // sender's sp
2914 #ifdef CC_INTERP
2915   __ movptr(Address(rbp,
2916                   -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
2917           rbx); // Make it walkable
2918 #else /* CC_INTERP */
2919   // This value is corrected by layout_activation_impl
2920   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2921   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2922 #endif /* CC_INTERP */
2923   __ movptr(sp_temp, rsp);              // pass to next frame
2924   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2925   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2926   __ decrementl(counter);             // decrement counter
2927   __ jcc(Assembler::notZero, loop);
2928   __ pushptr(Address(rcx, 0));            // save final return address
2929 
2930   // Re-push self-frame
2931   __ enter();                           // save old & set new rbp,
2932   __ subptr(rsp, (framesize-2) * wordSize);   // Prolog!
2933 
2934 
2935   // set last_Java_sp, last_Java_fp
2936   __ get_thread(rdi);
2937   __ set_last_Java_frame(rdi, noreg, rbp, NULL);
2938 
2939   // Call C code.  Need thread but NOT official VM entry
2940   // crud.  We cannot block on this call, no GC can happen.  Call should
2941   // restore return values to their stack-slots with the new SP.
2942   __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2943   __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2944   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2945   // Set an oopmap for the call site
2946   oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2947 
2948   __ get_thread(rdi);
2949   __ reset_last_Java_frame(rdi, true, false);
2950 
2951   // Pop self-frame.
2952   __ leave();     // Epilog!
2953 
2954   // Jump to interpreter
2955   __ ret(0);
2956 
2957   // -------------
2958   // make sure all code is generated
2959   masm->flush();
2960 
2961    _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2962 }
2963 #endif // COMPILER2
2964 
2965 //------------------------------generate_handler_blob------
2966 //
2967 // Generate a special Compile2Runtime blob that saves all registers,
2968 // setup oopmap, and calls safepoint code to stop the compiled code for
2969 // a safepoint.
2970 //
2971 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2972 
2973   // Account for thread arg in our frame
2974   const int additional_words = 1;
2975   int frame_size_in_words;
2976 
2977   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2978 
2979   ResourceMark rm;
2980   OopMapSet *oop_maps = new OopMapSet();
2981   OopMap* map;
2982 
2983   // allocate space for the code
2984   // setup code generation tools
2985   CodeBuffer   buffer("handler_blob", 1024, 512);
2986   MacroAssembler* masm = new MacroAssembler(&buffer);
2987 
2988   const Register java_thread = rdi; // callee-saved for VC++
2989   address start   = __ pc();
2990   address call_pc = NULL;
2991   bool cause_return = (poll_type == POLL_AT_RETURN);
2992   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2993 
2994   if (UseRTMLocking) {
2995     // Abort RTM transaction before calling runtime
2996     // because critical section will be large and will be
2997     // aborted anyway. Also nmethod could be deoptimized.
2998     __ xabort(0);
2999   }
3000 
3001   // If cause_return is true we are at a poll_return and there is
3002   // the return address on the stack to the caller on the nmethod
3003   // that is safepoint. We can leave this return on the stack and
3004   // effectively complete the return and safepoint in the caller.
3005   // Otherwise we push space for a return address that the safepoint
3006   // handler will install later to make the stack walking sensible.
3007   if (!cause_return)
3008     __ push(rbx);  // Make room for return address (or push it again)
3009 
3010   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
3011 
3012   // The following is basically a call_VM. However, we need the precise
3013   // address of the call in order to generate an oopmap. Hence, we do all the
3014   // work ourselves.
3015 
3016   // Push thread argument and setup last_Java_sp
3017   __ get_thread(java_thread);
3018   __ push(java_thread);
3019   __ set_last_Java_frame(java_thread, noreg, noreg, NULL);
3020 
3021   // if this was not a poll_return then we need to correct the return address now.
3022   if (!cause_return) {
3023     __ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
3024     __ movptr(Address(rbp, wordSize), rax);
3025   }
3026 
3027   // do the call
3028   __ call(RuntimeAddress(call_ptr));
3029 
3030   // Set an oopmap for the call site.  This oopmap will map all
3031   // oop-registers and debug-info registers as callee-saved.  This
3032   // will allow deoptimization at this safepoint to find all possible
3033   // debug-info recordings, as well as let GC find all oops.
3034 
3035   oop_maps->add_gc_map( __ pc() - start, map);
3036 
3037   // Discard arg
3038   __ pop(rcx);
3039 
3040   Label noException;
3041 
3042   // Clear last_Java_sp again
3043   __ get_thread(java_thread);
3044   __ reset_last_Java_frame(java_thread, false, false);
3045 
3046   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3047   __ jcc(Assembler::equal, noException);
3048 
3049   // Exception pending
3050   RegisterSaver::restore_live_registers(masm, save_vectors);
3051 
3052   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3053 
3054   __ bind(noException);
3055 
3056   // Normal exit, register restoring and exit
3057   RegisterSaver::restore_live_registers(masm, save_vectors);
3058 
3059   __ ret(0);
3060 
3061   // make sure all code is generated
3062   masm->flush();
3063 
3064   // Fill-out other meta info
3065   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3066 }
3067 
3068 //
3069 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3070 //
3071 // Generate a stub that calls into vm to find out the proper destination
3072 // of a java call. All the argument registers are live at this point
3073 // but since this is generic code we don't know what they are and the caller
3074 // must do any gc of the args.
3075 //
3076 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3077   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3078 
3079   // allocate space for the code
3080   ResourceMark rm;
3081 
3082   CodeBuffer buffer(name, 1000, 512);
3083   MacroAssembler* masm                = new MacroAssembler(&buffer);
3084 
3085   int frame_size_words;
3086   enum frame_layout {
3087                 thread_off,
3088                 extra_words };
3089 
3090   OopMapSet *oop_maps = new OopMapSet();
3091   OopMap* map = NULL;
3092 
3093   int start = __ offset();
3094 
3095   map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
3096 
3097   int frame_complete = __ offset();
3098 
3099   const Register thread = rdi;
3100   __ get_thread(rdi);
3101 
3102   __ push(thread);
3103   __ set_last_Java_frame(thread, noreg, rbp, NULL);
3104 
3105   __ call(RuntimeAddress(destination));
3106 
3107 
3108   // Set an oopmap for the call site.
3109   // We need this not only for callee-saved registers, but also for volatile
3110   // registers that the compiler might be keeping live across a safepoint.
3111 
3112   oop_maps->add_gc_map( __ offset() - start, map);
3113 
3114   // rax, contains the address we are going to jump to assuming no exception got installed
3115 
3116   __ addptr(rsp, wordSize);
3117 
3118   // clear last_Java_sp
3119   __ reset_last_Java_frame(thread, true, false);
3120   // check for pending exceptions
3121   Label pending;
3122   __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3123   __ jcc(Assembler::notEqual, pending);
3124 
3125   // get the returned Method*
3126   __ get_vm_result_2(rbx, thread);
3127   __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
3128 
3129   __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
3130 
3131   RegisterSaver::restore_live_registers(masm);
3132 
3133   // We are back the the original state on entry and ready to go.
3134 
3135   __ jmp(rax);
3136 
3137   // Pending exception after the safepoint
3138 
3139   __ bind(pending);
3140 
3141   RegisterSaver::restore_live_registers(masm);
3142 
3143   // exception pending => remove activation and forward to exception handler
3144 
3145   __ get_thread(thread);
3146   __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
3147   __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
3148   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3149 
3150   // -------------
3151   // make sure all code is generated
3152   masm->flush();
3153 
3154   // return the  blob
3155   // frame_size_words or bytes??
3156   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3157 }