1 /*
   2  * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "logging/log.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/compiledICHolder.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "runtime/vframeArray.hpp"
  37 #include "vmreg_x86.inline.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_Runtime1.hpp"
  40 #endif
  41 #ifdef COMPILER2
  42 #include "opto/runtime.hpp"
  43 #endif
  44 
  45 #define __ masm->
  46 
  47 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  48 
  49 class RegisterSaver {
  50   // Capture info about frame layout
  51 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  52   enum layout {
  53                 fpu_state_off = 0,
  54                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  55                 st0_off, st0H_off,
  56                 st1_off, st1H_off,
  57                 st2_off, st2H_off,
  58                 st3_off, st3H_off,
  59                 st4_off, st4H_off,
  60                 st5_off, st5H_off,
  61                 st6_off, st6H_off,
  62                 st7_off, st7H_off,
  63                 xmm_off,
  64                 DEF_XMM_OFFS(0),
  65                 DEF_XMM_OFFS(1),
  66                 DEF_XMM_OFFS(2),
  67                 DEF_XMM_OFFS(3),
  68                 DEF_XMM_OFFS(4),
  69                 DEF_XMM_OFFS(5),
  70                 DEF_XMM_OFFS(6),
  71                 DEF_XMM_OFFS(7),
  72                 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
  73                 rdi_off,
  74                 rsi_off,
  75                 ignore_off,  // extra copy of rbp,
  76                 rsp_off,
  77                 rbx_off,
  78                 rdx_off,
  79                 rcx_off,
  80                 rax_off,
  81                 // The frame sender code expects that rbp will be in the "natural" place and
  82                 // will override any oopMap setting for it. We must therefore force the layout
  83                 // so that it agrees with the frame sender code.
  84                 rbp_off,
  85                 return_off,      // slot for return address
  86                 reg_save_size };
  87   enum { FPU_regs_live = flags_off - fpu_state_end };
  88 
  89   public:
  90 
  91   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
  92                                      int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
  93   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
  94 
  95   static int rax_offset() { return rax_off; }
  96   static int rbx_offset() { return rbx_off; }
  97 
  98   // Offsets into the register save area
  99   // Used by deoptimization when it is managing result register
 100   // values on its own
 101 
 102   static int raxOffset(void) { return rax_off; }
 103   static int rdxOffset(void) { return rdx_off; }
 104   static int rbxOffset(void) { return rbx_off; }
 105   static int xmm0Offset(void) { return xmm0_off; }
 106   // This really returns a slot in the fp save area, which one is not important
 107   static int fpResultOffset(void) { return st0_off; }
 108 
 109   // During deoptimization only the result register need to be restored
 110   // all the other values have already been extracted.
 111 
 112   static void restore_result_registers(MacroAssembler* masm);
 113 
 114 };
 115 
 116 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 117                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 118   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 119   int ymm_bytes = num_xmm_regs * 16;
 120   int zmm_bytes = num_xmm_regs * 32;
 121 #ifdef COMPILER2
 122   if (save_vectors) {
 123     assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
 124     assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
 125     // Save upper half of YMM registers
 126     int vect_bytes = ymm_bytes;
 127     if (UseAVX > 2) {
 128       // Save upper half of ZMM registers as well
 129       vect_bytes += zmm_bytes;
 130     }
 131     additional_frame_words += vect_bytes / wordSize;
 132   }
 133 #else
 134   assert(!save_vectors, "vectors are generated only by C2");
 135 #endif
 136   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 137   int frame_words = frame_size_in_bytes / wordSize;
 138   *total_frame_words = frame_words;
 139 
 140   assert(FPUStateSizeInWords == 27, "update stack layout");
 141 
 142   // save registers, fpu state, and flags
 143   // We assume caller has already has return address slot on the stack
 144   // We push epb twice in this sequence because we want the real rbp,
 145   // to be under the return like a normal enter and we want to use pusha
 146   // We push by hand instead of pusing push
 147   __ enter();
 148   __ pusha();
 149   __ pushf();
 150   __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
 151   __ push_FPU_state();          // Save FPU state & init
 152 
 153   if (verify_fpu) {
 154     // Some stubs may have non standard FPU control word settings so
 155     // only check and reset the value when it required to be the
 156     // standard value.  The safepoint blob in particular can be used
 157     // in methods which are using the 24 bit control word for
 158     // optimized float math.
 159 
 160 #ifdef ASSERT
 161     // Make sure the control word has the expected value
 162     Label ok;
 163     __ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
 164     __ jccb(Assembler::equal, ok);
 165     __ stop("corrupted control word detected");
 166     __ bind(ok);
 167 #endif
 168 
 169     // Reset the control word to guard against exceptions being unmasked
 170     // since fstp_d can cause FPU stack underflow exceptions.  Write it
 171     // into the on stack copy and then reload that to make sure that the
 172     // current and future values are correct.
 173     __ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
 174   }
 175 
 176   __ frstor(Address(rsp, 0));
 177   if (!verify_fpu) {
 178     // Set the control word so that exceptions are masked for the
 179     // following code.
 180     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
 181   }
 182 
 183   int off = st0_off;
 184   int delta = st1_off - off;
 185 
 186   // Save the FPU registers in de-opt-able form
 187   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 188     __ fstp_d(Address(rsp, off*wordSize));
 189     off += delta;
 190   }
 191 
 192   off = xmm0_off;
 193   delta = xmm1_off - off;
 194   if(UseSSE == 1) {
 195     // Save the XMM state
 196     for (int n = 0; n < num_xmm_regs; n++) {
 197       __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
 198       off += delta;
 199     }
 200   } else if(UseSSE >= 2) {
 201     // Save whole 128bit (16 bytes) XMM registers
 202     for (int n = 0; n < num_xmm_regs; n++) {
 203       __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 204       off += delta;
 205     }
 206   }
 207 
 208   if (save_vectors) {
 209     __ subptr(rsp, ymm_bytes);
 210     // Save upper half of YMM registers
 211     for (int n = 0; n < num_xmm_regs; n++) {
 212       __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
 213     }
 214     if (UseAVX > 2) {
 215       __ subptr(rsp, zmm_bytes);
 216       // Save upper half of ZMM registers
 217       for (int n = 0; n < num_xmm_regs; n++) {
 218         __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
 219       }
 220     }
 221   }
 222 
 223   // Set an oopmap for the call site.  This oopmap will map all
 224   // oop-registers and debug-info registers as callee-saved.  This
 225   // will allow deoptimization at this safepoint to find all possible
 226   // debug-info recordings, as well as let GC find all oops.
 227 
 228   OopMapSet *oop_maps = new OopMapSet();
 229   OopMap* map =  new OopMap( frame_words, 0 );
 230 
 231 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 232 #define NEXTREG(x) (x)->as_VMReg()->next()
 233 
 234   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 235   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 236   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 237   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 238   // rbp, location is known implicitly, no oopMap
 239   map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
 240   map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
 241   // %%% This is really a waste but we'll keep things as they were for now for the upper component
 242   off = st0_off;
 243   delta = st1_off - off;
 244   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 245     FloatRegister freg_name = as_FloatRegister(n);
 246     map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
 247     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
 248     off += delta;
 249   }
 250   off = xmm0_off;
 251   delta = xmm1_off - off;
 252   for (int n = 0; n < num_xmm_regs; n++) {
 253     XMMRegister xmm_name = as_XMMRegister(n);
 254     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 255     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
 256     off += delta;
 257   }
 258 #undef NEXTREG
 259 #undef STACK_OFFSET
 260 
 261   return map;
 262 }
 263 
 264 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 265   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 266   int ymm_bytes = num_xmm_regs * 16;
 267   int zmm_bytes = num_xmm_regs * 32;
 268   // Recover XMM & FPU state
 269   int additional_frame_bytes = 0;
 270 #ifdef COMPILER2
 271   if (restore_vectors) {
 272     assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
 273     assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
 274     // Save upper half of YMM registers
 275     additional_frame_bytes = ymm_bytes;
 276     if (UseAVX > 2) {
 277       // Save upper half of ZMM registers as well
 278       additional_frame_bytes += zmm_bytes;
 279     }
 280   }
 281 #else
 282   assert(!restore_vectors, "vectors are generated only by C2");
 283 #endif
 284 
 285   int off = xmm0_off;
 286   int delta = xmm1_off - off;
 287 
 288   if (UseSSE == 1) {
 289     // Restore XMM registers
 290     assert(additional_frame_bytes == 0, "");
 291     for (int n = 0; n < num_xmm_regs; n++) {
 292       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 293       off += delta;
 294     }
 295   } else if (UseSSE >= 2) {
 296     // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
 297     // ZMM because the movdqu instruction zeros the upper part of the XMM register.
 298     for (int n = 0; n < num_xmm_regs; n++) {
 299       __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 300       off += delta;
 301     }
 302   }
 303 
 304   if (restore_vectors) {
 305     if (UseAVX > 2) {
 306       // Restore upper half of ZMM registers.
 307       for (int n = 0; n < num_xmm_regs; n++) {
 308         __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32));
 309       }
 310       __ addptr(rsp, zmm_bytes);
 311     }
 312     // Restore upper half of YMM registers.
 313     for (int n = 0; n < num_xmm_regs; n++) {
 314       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16));
 315     }
 316     __ addptr(rsp, ymm_bytes);
 317   }
 318 
 319   __ pop_FPU_state();
 320   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 321 
 322   __ popf();
 323   __ popa();
 324   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 325   __ pop(rbp);
 326 }
 327 
 328 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 329 
 330   // Just restore result register. Only used by deoptimization. By
 331   // now any callee save register that needs to be restore to a c2
 332   // caller of the deoptee has been extracted into the vframeArray
 333   // and will be stuffed into the c2i adapter we create for later
 334   // restoration so only result registers need to be restored here.
 335   //
 336 
 337   __ frstor(Address(rsp, 0));      // Restore fpu state
 338 
 339   // Recover XMM & FPU state
 340   if( UseSSE == 1 ) {
 341     __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
 342   } else if( UseSSE >= 2 ) {
 343     __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
 344   }
 345   __ movptr(rax, Address(rsp, rax_off*wordSize));
 346   __ movptr(rdx, Address(rsp, rdx_off*wordSize));
 347   // Pop all of the register save are off the stack except the return address
 348   __ addptr(rsp, return_off * wordSize);
 349 }
 350 
 351 // Is vector's size (in bytes) bigger than a size saved by default?
 352 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
 353 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
 354 bool SharedRuntime::is_wide_vector(int size) {
 355   return size > 16;
 356 }
 357 
 358 size_t SharedRuntime::trampoline_size() {
 359   return 16;
 360 }
 361 
 362 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 363   __ jump(RuntimeAddress(destination));
 364 }
 365 
 366 // The java_calling_convention describes stack locations as ideal slots on
 367 // a frame with no abi restrictions. Since we must observe abi restrictions
 368 // (like the placement of the register window) the slots must be biased by
 369 // the following value.
 370 static int reg2offset_in(VMReg r) {
 371   // Account for saved rbp, and return address
 372   // This should really be in_preserve_stack_slots
 373   return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
 374 }
 375 
 376 static int reg2offset_out(VMReg r) {
 377   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 378 }
 379 
 380 // ---------------------------------------------------------------------------
 381 // Read the array of BasicTypes from a signature, and compute where the
 382 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 383 // quantities.  Values less than SharedInfo::stack0 are registers, those above
 384 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 385 // as framesizes are fixed.
 386 // VMRegImpl::stack0 refers to the first slot 0(sp).
 387 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 388 // up to RegisterImpl::number_of_registers) are the 32-bit
 389 // integer registers.
 390 
 391 // Pass first two oop/int args in registers ECX and EDX.
 392 // Pass first two float/double args in registers XMM0 and XMM1.
 393 // Doubles have precedence, so if you pass a mix of floats and doubles
 394 // the doubles will grab the registers before the floats will.
 395 
 396 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 397 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 398 // units regardless of build. Of course for i486 there is no 64 bit build
 399 
 400 
 401 // ---------------------------------------------------------------------------
 402 // The compiled Java calling convention.
 403 // Pass first two oop/int args in registers ECX and EDX.
 404 // Pass first two float/double args in registers XMM0 and XMM1.
 405 // Doubles have precedence, so if you pass a mix of floats and doubles
 406 // the doubles will grab the registers before the floats will.
 407 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 408                                            VMRegPair *regs,
 409                                            int total_args_passed,
 410                                            int is_outgoing) {
 411   uint    stack = 0;          // Starting stack position for args on stack
 412 
 413 
 414   // Pass first two oop/int args in registers ECX and EDX.
 415   uint reg_arg0 = 9999;
 416   uint reg_arg1 = 9999;
 417 
 418   // Pass first two float/double args in registers XMM0 and XMM1.
 419   // Doubles have precedence, so if you pass a mix of floats and doubles
 420   // the doubles will grab the registers before the floats will.
 421   // CNC - TURNED OFF FOR non-SSE.
 422   //       On Intel we have to round all doubles (and most floats) at
 423   //       call sites by storing to the stack in any case.
 424   // UseSSE=0 ==> Don't Use ==> 9999+0
 425   // UseSSE=1 ==> Floats only ==> 9999+1
 426   // UseSSE>=2 ==> Floats or doubles ==> 9999+2
 427   enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
 428   uint fargs = (UseSSE>=2) ? 2 : UseSSE;
 429   uint freg_arg0 = 9999+fargs;
 430   uint freg_arg1 = 9999+fargs;
 431 
 432   // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
 433   int i;
 434   for( i = 0; i < total_args_passed; i++) {
 435     if( sig_bt[i] == T_DOUBLE ) {
 436       // first 2 doubles go in registers
 437       if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
 438       else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
 439       else // Else double is passed low on the stack to be aligned.
 440         stack += 2;
 441     } else if( sig_bt[i] == T_LONG ) {
 442       stack += 2;
 443     }
 444   }
 445   int dstack = 0;             // Separate counter for placing doubles
 446 
 447   // Now pick where all else goes.
 448   for( i = 0; i < total_args_passed; i++) {
 449     // From the type and the argument number (count) compute the location
 450     switch( sig_bt[i] ) {
 451     case T_SHORT:
 452     case T_CHAR:
 453     case T_BYTE:
 454     case T_BOOLEAN:
 455     case T_INT:
 456     case T_ARRAY:
 457     case T_OBJECT:
 458     case T_ADDRESS:
 459       if( reg_arg0 == 9999 )  {
 460         reg_arg0 = i;
 461         regs[i].set1(rcx->as_VMReg());
 462       } else if( reg_arg1 == 9999 )  {
 463         reg_arg1 = i;
 464         regs[i].set1(rdx->as_VMReg());
 465       } else {
 466         regs[i].set1(VMRegImpl::stack2reg(stack++));
 467       }
 468       break;
 469     case T_FLOAT:
 470       if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
 471         freg_arg0 = i;
 472         regs[i].set1(xmm0->as_VMReg());
 473       } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
 474         freg_arg1 = i;
 475         regs[i].set1(xmm1->as_VMReg());
 476       } else {
 477         regs[i].set1(VMRegImpl::stack2reg(stack++));
 478       }
 479       break;
 480     case T_LONG:
 481       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 482       regs[i].set2(VMRegImpl::stack2reg(dstack));
 483       dstack += 2;
 484       break;
 485     case T_DOUBLE:
 486       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 487       if( freg_arg0 == (uint)i ) {
 488         regs[i].set2(xmm0->as_VMReg());
 489       } else if( freg_arg1 == (uint)i ) {
 490         regs[i].set2(xmm1->as_VMReg());
 491       } else {
 492         regs[i].set2(VMRegImpl::stack2reg(dstack));
 493         dstack += 2;
 494       }
 495       break;
 496     case T_VOID: regs[i].set_bad(); break;
 497       break;
 498     default:
 499       ShouldNotReachHere();
 500       break;
 501     }
 502   }
 503 
 504   // return value can be odd number of VMRegImpl stack slots make multiple of 2
 505   return round_to(stack, 2);
 506 }
 507 
 508 // Patch the callers callsite with entry to compiled code if it exists.
 509 static void patch_callers_callsite(MacroAssembler *masm) {
 510   Label L;
 511   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 512   __ jcc(Assembler::equal, L);
 513   // Schedule the branch target address early.
 514   // Call into the VM to patch the caller, then jump to compiled callee
 515   // rax, isn't live so capture return address while we easily can
 516   __ movptr(rax, Address(rsp, 0));
 517   __ pusha();
 518   __ pushf();
 519 
 520   if (UseSSE == 1) {
 521     __ subptr(rsp, 2*wordSize);
 522     __ movflt(Address(rsp, 0), xmm0);
 523     __ movflt(Address(rsp, wordSize), xmm1);
 524   }
 525   if (UseSSE >= 2) {
 526     __ subptr(rsp, 4*wordSize);
 527     __ movdbl(Address(rsp, 0), xmm0);
 528     __ movdbl(Address(rsp, 2*wordSize), xmm1);
 529   }
 530 #ifdef COMPILER2
 531   // C2 may leave the stack dirty if not in SSE2+ mode
 532   if (UseSSE >= 2) {
 533     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 534   } else {
 535     __ empty_FPU_stack();
 536   }
 537 #endif /* COMPILER2 */
 538 
 539   // VM needs caller's callsite
 540   __ push(rax);
 541   // VM needs target method
 542   __ push(rbx);
 543   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 544   __ addptr(rsp, 2*wordSize);
 545 
 546   if (UseSSE == 1) {
 547     __ movflt(xmm0, Address(rsp, 0));
 548     __ movflt(xmm1, Address(rsp, wordSize));
 549     __ addptr(rsp, 2*wordSize);
 550   }
 551   if (UseSSE >= 2) {
 552     __ movdbl(xmm0, Address(rsp, 0));
 553     __ movdbl(xmm1, Address(rsp, 2*wordSize));
 554     __ addptr(rsp, 4*wordSize);
 555   }
 556 
 557   __ popf();
 558   __ popa();
 559   __ bind(L);
 560 }
 561 
 562 
 563 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
 564   int next_off = st_off - Interpreter::stackElementSize;
 565   __ movdbl(Address(rsp, next_off), r);
 566 }
 567 
 568 static void gen_c2i_adapter(MacroAssembler *masm,
 569                             int total_args_passed,
 570                             int comp_args_on_stack,
 571                             const BasicType *sig_bt,
 572                             const VMRegPair *regs,
 573                             Label& skip_fixup) {
 574   // Before we get into the guts of the C2I adapter, see if we should be here
 575   // at all.  We've come from compiled code and are attempting to jump to the
 576   // interpreter, which means the caller made a static call to get here
 577   // (vcalls always get a compiled target if there is one).  Check for a
 578   // compiled target.  If there is one, we need to patch the caller's call.
 579   patch_callers_callsite(masm);
 580 
 581   __ bind(skip_fixup);
 582 
 583 #ifdef COMPILER2
 584   // C2 may leave the stack dirty if not in SSE2+ mode
 585   if (UseSSE >= 2) {
 586     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 587   } else {
 588     __ empty_FPU_stack();
 589   }
 590 #endif /* COMPILER2 */
 591 
 592   // Since all args are passed on the stack, total_args_passed * interpreter_
 593   // stack_element_size  is the
 594   // space we need.
 595   int extraspace = total_args_passed * Interpreter::stackElementSize;
 596 
 597   // Get return address
 598   __ pop(rax);
 599 
 600   // set senderSP value
 601   __ movptr(rsi, rsp);
 602 
 603   __ subptr(rsp, extraspace);
 604 
 605   // Now write the args into the outgoing interpreter space
 606   for (int i = 0; i < total_args_passed; i++) {
 607     if (sig_bt[i] == T_VOID) {
 608       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 609       continue;
 610     }
 611 
 612     // st_off points to lowest address on stack.
 613     int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
 614     int next_off = st_off - Interpreter::stackElementSize;
 615 
 616     // Say 4 args:
 617     // i   st_off
 618     // 0   12 T_LONG
 619     // 1    8 T_VOID
 620     // 2    4 T_OBJECT
 621     // 3    0 T_BOOL
 622     VMReg r_1 = regs[i].first();
 623     VMReg r_2 = regs[i].second();
 624     if (!r_1->is_valid()) {
 625       assert(!r_2->is_valid(), "");
 626       continue;
 627     }
 628 
 629     if (r_1->is_stack()) {
 630       // memory to memory use fpu stack top
 631       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 632 
 633       if (!r_2->is_valid()) {
 634         __ movl(rdi, Address(rsp, ld_off));
 635         __ movptr(Address(rsp, st_off), rdi);
 636       } else {
 637 
 638         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
 639         // st_off == MSW, st_off-wordSize == LSW
 640 
 641         __ movptr(rdi, Address(rsp, ld_off));
 642         __ movptr(Address(rsp, next_off), rdi);
 643 #ifndef _LP64
 644         __ movptr(rdi, Address(rsp, ld_off + wordSize));
 645         __ movptr(Address(rsp, st_off), rdi);
 646 #else
 647 #ifdef ASSERT
 648         // Overwrite the unused slot with known junk
 649         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 650         __ movptr(Address(rsp, st_off), rax);
 651 #endif /* ASSERT */
 652 #endif // _LP64
 653       }
 654     } else if (r_1->is_Register()) {
 655       Register r = r_1->as_Register();
 656       if (!r_2->is_valid()) {
 657         __ movl(Address(rsp, st_off), r);
 658       } else {
 659         // long/double in gpr
 660         NOT_LP64(ShouldNotReachHere());
 661         // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 662         // T_DOUBLE and T_LONG use two slots in the interpreter
 663         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 664           // long/double in gpr
 665 #ifdef ASSERT
 666           // Overwrite the unused slot with known junk
 667           LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
 668           __ movptr(Address(rsp, st_off), rax);
 669 #endif /* ASSERT */
 670           __ movptr(Address(rsp, next_off), r);
 671         } else {
 672           __ movptr(Address(rsp, st_off), r);
 673         }
 674       }
 675     } else {
 676       assert(r_1->is_XMMRegister(), "");
 677       if (!r_2->is_valid()) {
 678         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 679       } else {
 680         assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
 681         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
 682       }
 683     }
 684   }
 685 
 686   // Schedule the branch target address early.
 687   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 688   // And repush original return address
 689   __ push(rax);
 690   __ jmp(rcx);
 691 }
 692 
 693 
 694 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
 695   int next_val_off = ld_off - Interpreter::stackElementSize;
 696   __ movdbl(r, Address(saved_sp, next_val_off));
 697 }
 698 
 699 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 700                         address code_start, address code_end,
 701                         Label& L_ok) {
 702   Label L_fail;
 703   __ lea(temp_reg, ExternalAddress(code_start));
 704   __ cmpptr(pc_reg, temp_reg);
 705   __ jcc(Assembler::belowEqual, L_fail);
 706   __ lea(temp_reg, ExternalAddress(code_end));
 707   __ cmpptr(pc_reg, temp_reg);
 708   __ jcc(Assembler::below, L_ok);
 709   __ bind(L_fail);
 710 }
 711 
 712 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 713                                     int total_args_passed,
 714                                     int comp_args_on_stack,
 715                                     const BasicType *sig_bt,
 716                                     const VMRegPair *regs) {
 717   // Note: rsi contains the senderSP on entry. We must preserve it since
 718   // we may do a i2c -> c2i transition if we lose a race where compiled
 719   // code goes non-entrant while we get args ready.
 720 
 721   // Adapters can be frameless because they do not require the caller
 722   // to perform additional cleanup work, such as correcting the stack pointer.
 723   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 724   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 725   // even if a callee has modified the stack pointer.
 726   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 727   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 728   // up via the senderSP register).
 729   // In other words, if *either* the caller or callee is interpreted, we can
 730   // get the stack pointer repaired after a call.
 731   // This is why c2i and i2c adapters cannot be indefinitely composed.
 732   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 733   // both caller and callee would be compiled methods, and neither would
 734   // clean up the stack pointer changes performed by the two adapters.
 735   // If this happens, control eventually transfers back to the compiled
 736   // caller, but with an uncorrected stack, causing delayed havoc.
 737 
 738   // Pick up the return address
 739   __ movptr(rax, Address(rsp, 0));
 740 
 741   if (VerifyAdapterCalls &&
 742       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 743     // So, let's test for cascading c2i/i2c adapters right now.
 744     //  assert(Interpreter::contains($return_addr) ||
 745     //         StubRoutines::contains($return_addr),
 746     //         "i2c adapter must return to an interpreter frame");
 747     __ block_comment("verify_i2c { ");
 748     Label L_ok;
 749     if (Interpreter::code() != NULL)
 750       range_check(masm, rax, rdi,
 751                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 752                   L_ok);
 753     if (StubRoutines::code1() != NULL)
 754       range_check(masm, rax, rdi,
 755                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 756                   L_ok);
 757     if (StubRoutines::code2() != NULL)
 758       range_check(masm, rax, rdi,
 759                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 760                   L_ok);
 761     const char* msg = "i2c adapter must return to an interpreter frame";
 762     __ block_comment(msg);
 763     __ stop(msg);
 764     __ bind(L_ok);
 765     __ block_comment("} verify_i2ce ");
 766   }
 767 
 768   // Must preserve original SP for loading incoming arguments because
 769   // we need to align the outgoing SP for compiled code.
 770   __ movptr(rdi, rsp);
 771 
 772   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
 773   // in registers, we will occasionally have no stack args.
 774   int comp_words_on_stack = 0;
 775   if (comp_args_on_stack) {
 776     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
 777     // registers are below.  By subtracting stack0, we either get a negative
 778     // number (all values in registers) or the maximum stack slot accessed.
 779     // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
 780     // Convert 4-byte stack slots to words.
 781     comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
 782     // Round up to miminum stack alignment, in wordSize
 783     comp_words_on_stack = round_to(comp_words_on_stack, 2);
 784     __ subptr(rsp, comp_words_on_stack * wordSize);
 785   }
 786 
 787   // Align the outgoing SP
 788   __ andptr(rsp, -(StackAlignmentInBytes));
 789 
 790   // push the return address on the stack (note that pushing, rather
 791   // than storing it, yields the correct frame alignment for the callee)
 792   __ push(rax);
 793 
 794   // Put saved SP in another register
 795   const Register saved_sp = rax;
 796   __ movptr(saved_sp, rdi);
 797 
 798 
 799   // Will jump to the compiled code just as if compiled code was doing it.
 800   // Pre-load the register-jump target early, to schedule it better.
 801   __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
 802 
 803   // Now generate the shuffle code.  Pick up all register args and move the
 804   // rest through the floating point stack top.
 805   for (int i = 0; i < total_args_passed; i++) {
 806     if (sig_bt[i] == T_VOID) {
 807       // Longs and doubles are passed in native word order, but misaligned
 808       // in the 32-bit build.
 809       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 810       continue;
 811     }
 812 
 813     // Pick up 0, 1 or 2 words from SP+offset.
 814 
 815     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 816             "scrambled load targets?");
 817     // Load in argument order going down.
 818     int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
 819     // Point to interpreter value (vs. tag)
 820     int next_off = ld_off - Interpreter::stackElementSize;
 821     //
 822     //
 823     //
 824     VMReg r_1 = regs[i].first();
 825     VMReg r_2 = regs[i].second();
 826     if (!r_1->is_valid()) {
 827       assert(!r_2->is_valid(), "");
 828       continue;
 829     }
 830     if (r_1->is_stack()) {
 831       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 832       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 833 
 834       // We can use rsi as a temp here because compiled code doesn't need rsi as an input
 835       // and if we end up going thru a c2i because of a miss a reasonable value of rsi
 836       // we be generated.
 837       if (!r_2->is_valid()) {
 838         // __ fld_s(Address(saved_sp, ld_off));
 839         // __ fstp_s(Address(rsp, st_off));
 840         __ movl(rsi, Address(saved_sp, ld_off));
 841         __ movptr(Address(rsp, st_off), rsi);
 842       } else {
 843         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 844         // are accessed as negative so LSW is at LOW address
 845 
 846         // ld_off is MSW so get LSW
 847         // st_off is LSW (i.e. reg.first())
 848         // __ fld_d(Address(saved_sp, next_off));
 849         // __ fstp_d(Address(rsp, st_off));
 850         //
 851         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 852         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 853         // So we must adjust where to pick up the data to match the interpreter.
 854         //
 855         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 856         // are accessed as negative so LSW is at LOW address
 857 
 858         // ld_off is MSW so get LSW
 859         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 860                            next_off : ld_off;
 861         __ movptr(rsi, Address(saved_sp, offset));
 862         __ movptr(Address(rsp, st_off), rsi);
 863 #ifndef _LP64
 864         __ movptr(rsi, Address(saved_sp, ld_off));
 865         __ movptr(Address(rsp, st_off + wordSize), rsi);
 866 #endif // _LP64
 867       }
 868     } else if (r_1->is_Register()) {  // Register argument
 869       Register r = r_1->as_Register();
 870       assert(r != rax, "must be different");
 871       if (r_2->is_valid()) {
 872         //
 873         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 874         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 875         // So we must adjust where to pick up the data to match the interpreter.
 876 
 877         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 878                            next_off : ld_off;
 879 
 880         // this can be a misaligned move
 881         __ movptr(r, Address(saved_sp, offset));
 882 #ifndef _LP64
 883         assert(r_2->as_Register() != rax, "need another temporary register");
 884         // Remember r_1 is low address (and LSB on x86)
 885         // So r_2 gets loaded from high address regardless of the platform
 886         __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
 887 #endif // _LP64
 888       } else {
 889         __ movl(r, Address(saved_sp, ld_off));
 890       }
 891     } else {
 892       assert(r_1->is_XMMRegister(), "");
 893       if (!r_2->is_valid()) {
 894         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 895       } else {
 896         move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
 897       }
 898     }
 899   }
 900 
 901   // 6243940 We might end up in handle_wrong_method if
 902   // the callee is deoptimized as we race thru here. If that
 903   // happens we don't want to take a safepoint because the
 904   // caller frame will look interpreted and arguments are now
 905   // "compiled" so it is much better to make this transition
 906   // invisible to the stack walking code. Unfortunately if
 907   // we try and find the callee by normal means a safepoint
 908   // is possible. So we stash the desired callee in the thread
 909   // and the vm will find there should this case occur.
 910 
 911   __ get_thread(rax);
 912   __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
 913 
 914   // move Method* to rax, in case we end up in an c2i adapter.
 915   // the c2i adapters expect Method* in rax, (c2) because c2's
 916   // resolve stubs return the result (the method) in rax,.
 917   // I'd love to fix this.
 918   __ mov(rax, rbx);
 919 
 920   __ jmp(rdi);
 921 }
 922 
 923 // ---------------------------------------------------------------
 924 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 925                                                             int total_args_passed,
 926                                                             int comp_args_on_stack,
 927                                                             const BasicType *sig_bt,
 928                                                             const VMRegPair *regs,
 929                                                             AdapterFingerPrint* fingerprint) {
 930   address i2c_entry = __ pc();
 931 
 932   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 933 
 934   // -------------------------------------------------------------------------
 935   // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
 936   // to the interpreter.  The args start out packed in the compiled layout.  They
 937   // need to be unpacked into the interpreter layout.  This will almost always
 938   // require some stack space.  We grow the current (compiled) stack, then repack
 939   // the args.  We  finally end in a jump to the generic interpreter entry point.
 940   // On exit from the interpreter, the interpreter will restore our SP (lest the
 941   // compiled code, which relys solely on SP and not EBP, get sick).
 942 
 943   address c2i_unverified_entry = __ pc();
 944   Label skip_fixup;
 945 
 946   Register holder = rax;
 947   Register receiver = rcx;
 948   Register temp = rbx;
 949 
 950   {
 951 
 952     Label missed;
 953     __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
 954     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 955     __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
 956     __ jcc(Assembler::notEqual, missed);
 957     // Method might have been compiled since the call site was patched to
 958     // interpreted if that is the case treat it as a miss so we can get
 959     // the call site corrected.
 960     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 961     __ jcc(Assembler::equal, skip_fixup);
 962 
 963     __ bind(missed);
 964     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 965   }
 966 
 967   address c2i_entry = __ pc();
 968 
 969   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 970 
 971   __ flush();
 972   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 973 }
 974 
 975 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 976                                          VMRegPair *regs,
 977                                          VMRegPair *regs2,
 978                                          int total_args_passed) {
 979   assert(regs2 == NULL, "not needed on x86");
 980 // We return the amount of VMRegImpl stack slots we need to reserve for all
 981 // the arguments NOT counting out_preserve_stack_slots.
 982 
 983   uint    stack = 0;        // All arguments on stack
 984 
 985   for( int i = 0; i < total_args_passed; i++) {
 986     // From the type and the argument number (count) compute the location
 987     switch( sig_bt[i] ) {
 988     case T_BOOLEAN:
 989     case T_CHAR:
 990     case T_FLOAT:
 991     case T_BYTE:
 992     case T_SHORT:
 993     case T_INT:
 994     case T_OBJECT:
 995     case T_ARRAY:
 996     case T_ADDRESS:
 997     case T_METADATA:
 998       regs[i].set1(VMRegImpl::stack2reg(stack++));
 999       break;
1000     case T_LONG:
1001     case T_DOUBLE: // The stack numbering is reversed from Java
1002       // Since C arguments do not get reversed, the ordering for
1003       // doubles on the stack must be opposite the Java convention
1004       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
1005       regs[i].set2(VMRegImpl::stack2reg(stack));
1006       stack += 2;
1007       break;
1008     case T_VOID: regs[i].set_bad(); break;
1009     default:
1010       ShouldNotReachHere();
1011       break;
1012     }
1013   }
1014   return stack;
1015 }
1016 
1017 // A simple move of integer like type
1018 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1019   if (src.first()->is_stack()) {
1020     if (dst.first()->is_stack()) {
1021       // stack to stack
1022       // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1023       // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1024       __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1025       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1026     } else {
1027       // stack to reg
1028       __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
1029     }
1030   } else if (dst.first()->is_stack()) {
1031     // reg to stack
1032     // no need to sign extend on 64bit
1033     __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1034   } else {
1035     if (dst.first() != src.first()) {
1036       __ mov(dst.first()->as_Register(), src.first()->as_Register());
1037     }
1038   }
1039 }
1040 
1041 // An oop arg. Must pass a handle not the oop itself
1042 static void object_move(MacroAssembler* masm,
1043                         OopMap* map,
1044                         int oop_handle_offset,
1045                         int framesize_in_slots,
1046                         VMRegPair src,
1047                         VMRegPair dst,
1048                         bool is_receiver,
1049                         int* receiver_offset) {
1050 
1051   // Because of the calling conventions we know that src can be a
1052   // register or a stack location. dst can only be a stack location.
1053 
1054   assert(dst.first()->is_stack(), "must be stack");
1055   // must pass a handle. First figure out the location we use as a handle
1056 
1057   if (src.first()->is_stack()) {
1058     // Oop is already on the stack as an argument
1059     Register rHandle = rax;
1060     Label nil;
1061     __ xorptr(rHandle, rHandle);
1062     __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1063     __ jcc(Assembler::equal, nil);
1064     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1065     __ bind(nil);
1066     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1067 
1068     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1069     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1070     if (is_receiver) {
1071       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1072     }
1073   } else {
1074     // Oop is in an a register we must store it to the space we reserve
1075     // on the stack for oop_handles
1076     const Register rOop = src.first()->as_Register();
1077     const Register rHandle = rax;
1078     int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1079     int offset = oop_slot*VMRegImpl::stack_slot_size;
1080     Label skip;
1081     __ movptr(Address(rsp, offset), rOop);
1082     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1083     __ xorptr(rHandle, rHandle);
1084     __ cmpptr(rOop, (int32_t)NULL_WORD);
1085     __ jcc(Assembler::equal, skip);
1086     __ lea(rHandle, Address(rsp, offset));
1087     __ bind(skip);
1088     // Store the handle parameter
1089     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1090     if (is_receiver) {
1091       *receiver_offset = offset;
1092     }
1093   }
1094 }
1095 
1096 // A float arg may have to do float reg int reg conversion
1097 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1098   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1099 
1100   // Because of the calling convention we know that src is either a stack location
1101   // or an xmm register. dst can only be a stack location.
1102 
1103   assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1104 
1105   if (src.first()->is_stack()) {
1106     __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1107     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1108   } else {
1109     // reg to stack
1110     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1111   }
1112 }
1113 
1114 // A long move
1115 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1116 
1117   // The only legal possibility for a long_move VMRegPair is:
1118   // 1: two stack slots (possibly unaligned)
1119   // as neither the java  or C calling convention will use registers
1120   // for longs.
1121 
1122   if (src.first()->is_stack() && dst.first()->is_stack()) {
1123     assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1124     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1125     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1126     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1127     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1128   } else {
1129     ShouldNotReachHere();
1130   }
1131 }
1132 
1133 // A double move
1134 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1135 
1136   // The only legal possibilities for a double_move VMRegPair are:
1137   // The painful thing here is that like long_move a VMRegPair might be
1138 
1139   // Because of the calling convention we know that src is either
1140   //   1: a single physical register (xmm registers only)
1141   //   2: two stack slots (possibly unaligned)
1142   // dst can only be a pair of stack slots.
1143 
1144   assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1145 
1146   if (src.first()->is_stack()) {
1147     // source is all stack
1148     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1149     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1150     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1151     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1152   } else {
1153     // reg to stack
1154     // No worries about stack alignment
1155     __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1156   }
1157 }
1158 
1159 
1160 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1161   // We always ignore the frame_slots arg and just use the space just below frame pointer
1162   // which by this time is free to use
1163   switch (ret_type) {
1164   case T_FLOAT:
1165     __ fstp_s(Address(rbp, -wordSize));
1166     break;
1167   case T_DOUBLE:
1168     __ fstp_d(Address(rbp, -2*wordSize));
1169     break;
1170   case T_VOID:  break;
1171   case T_LONG:
1172     __ movptr(Address(rbp, -wordSize), rax);
1173     NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
1174     break;
1175   default: {
1176     __ movptr(Address(rbp, -wordSize), rax);
1177     }
1178   }
1179 }
1180 
1181 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1182   // We always ignore the frame_slots arg and just use the space just below frame pointer
1183   // which by this time is free to use
1184   switch (ret_type) {
1185   case T_FLOAT:
1186     __ fld_s(Address(rbp, -wordSize));
1187     break;
1188   case T_DOUBLE:
1189     __ fld_d(Address(rbp, -2*wordSize));
1190     break;
1191   case T_LONG:
1192     __ movptr(rax, Address(rbp, -wordSize));
1193     NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
1194     break;
1195   case T_VOID:  break;
1196   default: {
1197     __ movptr(rax, Address(rbp, -wordSize));
1198     }
1199   }
1200 }
1201 
1202 
1203 static void save_or_restore_arguments(MacroAssembler* masm,
1204                                       const int stack_slots,
1205                                       const int total_in_args,
1206                                       const int arg_save_area,
1207                                       OopMap* map,
1208                                       VMRegPair* in_regs,
1209                                       BasicType* in_sig_bt) {
1210   // if map is non-NULL then the code should store the values,
1211   // otherwise it should load them.
1212   int handle_index = 0;
1213   // Save down double word first
1214   for ( int i = 0; i < total_in_args; i++) {
1215     if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1216       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1217       int offset = slot * VMRegImpl::stack_slot_size;
1218       handle_index += 2;
1219       assert(handle_index <= stack_slots, "overflow");
1220       if (map != NULL) {
1221         __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1222       } else {
1223         __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1224       }
1225     }
1226     if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
1227       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1228       int offset = slot * VMRegImpl::stack_slot_size;
1229       handle_index += 2;
1230       assert(handle_index <= stack_slots, "overflow");
1231       if (map != NULL) {
1232         __ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
1233         if (in_regs[i].second()->is_Register()) {
1234           __ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
1235         }
1236       } else {
1237         __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
1238         if (in_regs[i].second()->is_Register()) {
1239           __ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
1240         }
1241       }
1242     }
1243   }
1244   // Save or restore single word registers
1245   for ( int i = 0; i < total_in_args; i++) {
1246     if (in_regs[i].first()->is_Register()) {
1247       int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1248       int offset = slot * VMRegImpl::stack_slot_size;
1249       assert(handle_index <= stack_slots, "overflow");
1250       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1251         map->set_oop(VMRegImpl::stack2reg(slot));;
1252       }
1253 
1254       // Value is in an input register pass we must flush it to the stack
1255       const Register reg = in_regs[i].first()->as_Register();
1256       switch (in_sig_bt[i]) {
1257         case T_ARRAY:
1258           if (map != NULL) {
1259             __ movptr(Address(rsp, offset), reg);
1260           } else {
1261             __ movptr(reg, Address(rsp, offset));
1262           }
1263           break;
1264         case T_BOOLEAN:
1265         case T_CHAR:
1266         case T_BYTE:
1267         case T_SHORT:
1268         case T_INT:
1269           if (map != NULL) {
1270             __ movl(Address(rsp, offset), reg);
1271           } else {
1272             __ movl(reg, Address(rsp, offset));
1273           }
1274           break;
1275         case T_OBJECT:
1276         default: ShouldNotReachHere();
1277       }
1278     } else if (in_regs[i].first()->is_XMMRegister()) {
1279       if (in_sig_bt[i] == T_FLOAT) {
1280         int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1281         int offset = slot * VMRegImpl::stack_slot_size;
1282         assert(handle_index <= stack_slots, "overflow");
1283         if (map != NULL) {
1284           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1285         } else {
1286           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1287         }
1288       }
1289     } else if (in_regs[i].first()->is_stack()) {
1290       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1291         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1292         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1293       }
1294     }
1295   }
1296 }
1297 
1298 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1299 // keeps a new JNI critical region from starting until a GC has been
1300 // forced.  Save down any oops in registers and describe them in an
1301 // OopMap.
1302 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1303                                                Register thread,
1304                                                int stack_slots,
1305                                                int total_c_args,
1306                                                int total_in_args,
1307                                                int arg_save_area,
1308                                                OopMapSet* oop_maps,
1309                                                VMRegPair* in_regs,
1310                                                BasicType* in_sig_bt) {
1311   __ block_comment("check GCLocker::needs_gc");
1312   Label cont;
1313   __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1314   __ jcc(Assembler::equal, cont);
1315 
1316   // Save down any incoming oops and call into the runtime to halt for a GC
1317 
1318   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1319 
1320   save_or_restore_arguments(masm, stack_slots, total_in_args,
1321                             arg_save_area, map, in_regs, in_sig_bt);
1322 
1323   address the_pc = __ pc();
1324   oop_maps->add_gc_map( __ offset(), map);
1325   __ set_last_Java_frame(thread, rsp, noreg, the_pc);
1326 
1327   __ block_comment("block_for_jni_critical");
1328   __ push(thread);
1329   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1330   __ increment(rsp, wordSize);
1331 
1332   __ get_thread(thread);
1333   __ reset_last_Java_frame(thread, false);
1334 
1335   save_or_restore_arguments(masm, stack_slots, total_in_args,
1336                             arg_save_area, NULL, in_regs, in_sig_bt);
1337 
1338   __ bind(cont);
1339 #ifdef ASSERT
1340   if (StressCriticalJNINatives) {
1341     // Stress register saving
1342     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1343     save_or_restore_arguments(masm, stack_slots, total_in_args,
1344                               arg_save_area, map, in_regs, in_sig_bt);
1345     // Destroy argument registers
1346     for (int i = 0; i < total_in_args - 1; i++) {
1347       if (in_regs[i].first()->is_Register()) {
1348         const Register reg = in_regs[i].first()->as_Register();
1349         __ xorptr(reg, reg);
1350       } else if (in_regs[i].first()->is_XMMRegister()) {
1351         __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1352       } else if (in_regs[i].first()->is_FloatRegister()) {
1353         ShouldNotReachHere();
1354       } else if (in_regs[i].first()->is_stack()) {
1355         // Nothing to do
1356       } else {
1357         ShouldNotReachHere();
1358       }
1359       if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1360         i++;
1361       }
1362     }
1363 
1364     save_or_restore_arguments(masm, stack_slots, total_in_args,
1365                               arg_save_area, NULL, in_regs, in_sig_bt);
1366   }
1367 #endif
1368 }
1369 
1370 // Unpack an array argument into a pointer to the body and the length
1371 // if the array is non-null, otherwise pass 0 for both.
1372 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1373   Register tmp_reg = rax;
1374   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1375          "possible collision");
1376   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1377          "possible collision");
1378 
1379   // Pass the length, ptr pair
1380   Label is_null, done;
1381   VMRegPair tmp(tmp_reg->as_VMReg());
1382   if (reg.first()->is_stack()) {
1383     // Load the arg up from the stack
1384     simple_move32(masm, reg, tmp);
1385     reg = tmp;
1386   }
1387   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1388   __ jccb(Assembler::equal, is_null);
1389   __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1390   simple_move32(masm, tmp, body_arg);
1391   // load the length relative to the body.
1392   __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1393                            arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1394   simple_move32(masm, tmp, length_arg);
1395   __ jmpb(done);
1396   __ bind(is_null);
1397   // Pass zeros
1398   __ xorptr(tmp_reg, tmp_reg);
1399   simple_move32(masm, tmp, body_arg);
1400   simple_move32(masm, tmp, length_arg);
1401   __ bind(done);
1402 }
1403 
1404 static void verify_oop_args(MacroAssembler* masm,
1405                             methodHandle method,
1406                             const BasicType* sig_bt,
1407                             const VMRegPair* regs) {
1408   Register temp_reg = rbx;  // not part of any compiled calling seq
1409   if (VerifyOops) {
1410     for (int i = 0; i < method->size_of_parameters(); i++) {
1411       if (sig_bt[i] == T_OBJECT ||
1412           sig_bt[i] == T_ARRAY) {
1413         VMReg r = regs[i].first();
1414         assert(r->is_valid(), "bad oop arg");
1415         if (r->is_stack()) {
1416           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1417           __ verify_oop(temp_reg);
1418         } else {
1419           __ verify_oop(r->as_Register());
1420         }
1421       }
1422     }
1423   }
1424 }
1425 
1426 static void gen_special_dispatch(MacroAssembler* masm,
1427                                  methodHandle method,
1428                                  const BasicType* sig_bt,
1429                                  const VMRegPair* regs) {
1430   verify_oop_args(masm, method, sig_bt, regs);
1431   vmIntrinsics::ID iid = method->intrinsic_id();
1432 
1433   // Now write the args into the outgoing interpreter space
1434   bool     has_receiver   = false;
1435   Register receiver_reg   = noreg;
1436   int      member_arg_pos = -1;
1437   Register member_reg     = noreg;
1438   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1439   if (ref_kind != 0) {
1440     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1441     member_reg = rbx;  // known to be free at this point
1442     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1443   } else if (iid == vmIntrinsics::_invokeBasic) {
1444     has_receiver = true;
1445   } else {
1446     fatal("unexpected intrinsic id %d", iid);
1447   }
1448 
1449   if (member_reg != noreg) {
1450     // Load the member_arg into register, if necessary.
1451     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1452     VMReg r = regs[member_arg_pos].first();
1453     if (r->is_stack()) {
1454       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1455     } else {
1456       // no data motion is needed
1457       member_reg = r->as_Register();
1458     }
1459   }
1460 
1461   if (has_receiver) {
1462     // Make sure the receiver is loaded into a register.
1463     assert(method->size_of_parameters() > 0, "oob");
1464     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1465     VMReg r = regs[0].first();
1466     assert(r->is_valid(), "bad receiver arg");
1467     if (r->is_stack()) {
1468       // Porting note:  This assumes that compiled calling conventions always
1469       // pass the receiver oop in a register.  If this is not true on some
1470       // platform, pick a temp and load the receiver from stack.
1471       fatal("receiver always in a register");
1472       receiver_reg = rcx;  // known to be free at this point
1473       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1474     } else {
1475       // no data motion is needed
1476       receiver_reg = r->as_Register();
1477     }
1478   }
1479 
1480   // Figure out which address we are really jumping to:
1481   MethodHandles::generate_method_handle_dispatch(masm, iid,
1482                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1483 }
1484 
1485 // ---------------------------------------------------------------------------
1486 // Generate a native wrapper for a given method.  The method takes arguments
1487 // in the Java compiled code convention, marshals them to the native
1488 // convention (handlizes oops, etc), transitions to native, makes the call,
1489 // returns to java state (possibly blocking), unhandlizes any result and
1490 // returns.
1491 //
1492 // Critical native functions are a shorthand for the use of
1493 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1494 // functions.  The wrapper is expected to unpack the arguments before
1495 // passing them to the callee and perform checks before and after the
1496 // native call to ensure that they GCLocker
1497 // lock_critical/unlock_critical semantics are followed.  Some other
1498 // parts of JNI setup are skipped like the tear down of the JNI handle
1499 // block and the check for pending exceptions it's impossible for them
1500 // to be thrown.
1501 //
1502 // They are roughly structured like this:
1503 //    if (GCLocker::needs_gc())
1504 //      SharedRuntime::block_for_jni_critical();
1505 //    tranistion to thread_in_native
1506 //    unpack arrray arguments and call native entry point
1507 //    check for safepoint in progress
1508 //    check if any thread suspend flags are set
1509 //      call into JVM and possible unlock the JNI critical
1510 //      if a GC was suppressed while in the critical native.
1511 //    transition back to thread_in_Java
1512 //    return to caller
1513 //
1514 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1515                                                 const methodHandle& method,
1516                                                 int compile_id,
1517                                                 BasicType* in_sig_bt,
1518                                                 VMRegPair* in_regs,
1519                                                 BasicType ret_type) {
1520   if (method->is_method_handle_intrinsic()) {
1521     vmIntrinsics::ID iid = method->intrinsic_id();
1522     intptr_t start = (intptr_t)__ pc();
1523     int vep_offset = ((intptr_t)__ pc()) - start;
1524     gen_special_dispatch(masm,
1525                          method,
1526                          in_sig_bt,
1527                          in_regs);
1528     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1529     __ flush();
1530     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1531     return nmethod::new_native_nmethod(method,
1532                                        compile_id,
1533                                        masm->code(),
1534                                        vep_offset,
1535                                        frame_complete,
1536                                        stack_slots / VMRegImpl::slots_per_word,
1537                                        in_ByteSize(-1),
1538                                        in_ByteSize(-1),
1539                                        (OopMapSet*)NULL);
1540   }
1541   bool is_critical_native = true;
1542   address native_func = method->critical_native_function();
1543   if (native_func == NULL) {
1544     native_func = method->native_function();
1545     is_critical_native = false;
1546   }
1547   assert(native_func != NULL, "must have function");
1548 
1549   // An OopMap for lock (and class if static)
1550   OopMapSet *oop_maps = new OopMapSet();
1551 
1552   // We have received a description of where all the java arg are located
1553   // on entry to the wrapper. We need to convert these args to where
1554   // the jni function will expect them. To figure out where they go
1555   // we convert the java signature to a C signature by inserting
1556   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1557 
1558   const int total_in_args = method->size_of_parameters();
1559   int total_c_args = total_in_args;
1560   if (!is_critical_native) {
1561     total_c_args += 1;
1562     if (method->is_static()) {
1563       total_c_args++;
1564     }
1565   } else {
1566     for (int i = 0; i < total_in_args; i++) {
1567       if (in_sig_bt[i] == T_ARRAY) {
1568         total_c_args++;
1569       }
1570     }
1571   }
1572 
1573   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1574   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1575   BasicType* in_elem_bt = NULL;
1576 
1577   int argc = 0;
1578   if (!is_critical_native) {
1579     out_sig_bt[argc++] = T_ADDRESS;
1580     if (method->is_static()) {
1581       out_sig_bt[argc++] = T_OBJECT;
1582     }
1583 
1584     for (int i = 0; i < total_in_args ; i++ ) {
1585       out_sig_bt[argc++] = in_sig_bt[i];
1586     }
1587   } else {
1588     Thread* THREAD = Thread::current();
1589     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1590     SignatureStream ss(method->signature());
1591     for (int i = 0; i < total_in_args ; i++ ) {
1592       if (in_sig_bt[i] == T_ARRAY) {
1593         // Arrays are passed as int, elem* pair
1594         out_sig_bt[argc++] = T_INT;
1595         out_sig_bt[argc++] = T_ADDRESS;
1596         Symbol* atype = ss.as_symbol(CHECK_NULL);
1597         const char* at = atype->as_C_string();
1598         if (strlen(at) == 2) {
1599           assert(at[0] == '[', "must be");
1600           switch (at[1]) {
1601             case 'B': in_elem_bt[i]  = T_BYTE; break;
1602             case 'C': in_elem_bt[i]  = T_CHAR; break;
1603             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1604             case 'F': in_elem_bt[i]  = T_FLOAT; break;
1605             case 'I': in_elem_bt[i]  = T_INT; break;
1606             case 'J': in_elem_bt[i]  = T_LONG; break;
1607             case 'S': in_elem_bt[i]  = T_SHORT; break;
1608             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1609             default: ShouldNotReachHere();
1610           }
1611         }
1612       } else {
1613         out_sig_bt[argc++] = in_sig_bt[i];
1614         in_elem_bt[i] = T_VOID;
1615       }
1616       if (in_sig_bt[i] != T_VOID) {
1617         assert(in_sig_bt[i] == ss.type(), "must match");
1618         ss.next();
1619       }
1620     }
1621   }
1622 
1623   // Now figure out where the args must be stored and how much stack space
1624   // they require.
1625   int out_arg_slots;
1626   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1627 
1628   // Compute framesize for the wrapper.  We need to handlize all oops in
1629   // registers a max of 2 on x86.
1630 
1631   // Calculate the total number of stack slots we will need.
1632 
1633   // First count the abi requirement plus all of the outgoing args
1634   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1635 
1636   // Now the space for the inbound oop handle area
1637   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1638   if (is_critical_native) {
1639     // Critical natives may have to call out so they need a save area
1640     // for register arguments.
1641     int double_slots = 0;
1642     int single_slots = 0;
1643     for ( int i = 0; i < total_in_args; i++) {
1644       if (in_regs[i].first()->is_Register()) {
1645         const Register reg = in_regs[i].first()->as_Register();
1646         switch (in_sig_bt[i]) {
1647           case T_ARRAY:  // critical array (uses 2 slots on LP64)
1648           case T_BOOLEAN:
1649           case T_BYTE:
1650           case T_SHORT:
1651           case T_CHAR:
1652           case T_INT:  single_slots++; break;
1653           case T_LONG: double_slots++; break;
1654           default:  ShouldNotReachHere();
1655         }
1656       } else if (in_regs[i].first()->is_XMMRegister()) {
1657         switch (in_sig_bt[i]) {
1658           case T_FLOAT:  single_slots++; break;
1659           case T_DOUBLE: double_slots++; break;
1660           default:  ShouldNotReachHere();
1661         }
1662       } else if (in_regs[i].first()->is_FloatRegister()) {
1663         ShouldNotReachHere();
1664       }
1665     }
1666     total_save_slots = double_slots * 2 + single_slots;
1667     // align the save area
1668     if (double_slots != 0) {
1669       stack_slots = round_to(stack_slots, 2);
1670     }
1671   }
1672 
1673   int oop_handle_offset = stack_slots;
1674   stack_slots += total_save_slots;
1675 
1676   // Now any space we need for handlizing a klass if static method
1677 
1678   int klass_slot_offset = 0;
1679   int klass_offset = -1;
1680   int lock_slot_offset = 0;
1681   bool is_static = false;
1682 
1683   if (method->is_static()) {
1684     klass_slot_offset = stack_slots;
1685     stack_slots += VMRegImpl::slots_per_word;
1686     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1687     is_static = true;
1688   }
1689 
1690   // Plus a lock if needed
1691 
1692   if (method->is_synchronized()) {
1693     lock_slot_offset = stack_slots;
1694     stack_slots += VMRegImpl::slots_per_word;
1695   }
1696 
1697   // Now a place (+2) to save return values or temp during shuffling
1698   // + 2 for return address (which we own) and saved rbp,
1699   stack_slots += 4;
1700 
1701   // Ok The space we have allocated will look like:
1702   //
1703   //
1704   // FP-> |                     |
1705   //      |---------------------|
1706   //      | 2 slots for moves   |
1707   //      |---------------------|
1708   //      | lock box (if sync)  |
1709   //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1710   //      | klass (if static)   |
1711   //      |---------------------| <- klass_slot_offset
1712   //      | oopHandle area      |
1713   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1714   //      | outbound memory     |
1715   //      | based arguments     |
1716   //      |                     |
1717   //      |---------------------|
1718   //      |                     |
1719   // SP-> | out_preserved_slots |
1720   //
1721   //
1722   // ****************************************************************************
1723   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1724   // arguments off of the stack after the jni call. Before the call we can use
1725   // instructions that are SP relative. After the jni call we switch to FP
1726   // relative instructions instead of re-adjusting the stack on windows.
1727   // ****************************************************************************
1728 
1729 
1730   // Now compute actual number of stack words we need rounding to make
1731   // stack properly aligned.
1732   stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1733 
1734   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1735 
1736   intptr_t start = (intptr_t)__ pc();
1737 
1738   // First thing make an ic check to see if we should even be here
1739 
1740   // We are free to use all registers as temps without saving them and
1741   // restoring them except rbp. rbp is the only callee save register
1742   // as far as the interpreter and the compiler(s) are concerned.
1743 
1744 
1745   const Register ic_reg = rax;
1746   const Register receiver = rcx;
1747   Label hit;
1748   Label exception_pending;
1749 
1750   __ verify_oop(receiver);
1751   __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1752   __ jcc(Assembler::equal, hit);
1753 
1754   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1755 
1756   // verified entry must be aligned for code patching.
1757   // and the first 5 bytes must be in the same cache line
1758   // if we align at 8 then we will be sure 5 bytes are in the same line
1759   __ align(8);
1760 
1761   __ bind(hit);
1762 
1763   int vep_offset = ((intptr_t)__ pc()) - start;
1764 
1765 #ifdef COMPILER1
1766   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1767   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1768     inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
1769    }
1770 #endif // COMPILER1
1771 
1772   // The instruction at the verified entry point must be 5 bytes or longer
1773   // because it can be patched on the fly by make_non_entrant. The stack bang
1774   // instruction fits that requirement.
1775 
1776   // Generate stack overflow check
1777 
1778   if (UseStackBanging) {
1779     __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
1780   } else {
1781     // need a 5 byte instruction to allow MT safe patching to non-entrant
1782     __ fat_nop();
1783   }
1784 
1785   // Generate a new frame for the wrapper.
1786   __ enter();
1787   // -2 because return address is already present and so is saved rbp
1788   __ subptr(rsp, stack_size - 2*wordSize);
1789 
1790   // Frame is now completed as far as size and linkage.
1791   int frame_complete = ((intptr_t)__ pc()) - start;
1792 
1793   if (UseRTMLocking) {
1794     // Abort RTM transaction before calling JNI
1795     // because critical section will be large and will be
1796     // aborted anyway. Also nmethod could be deoptimized.
1797     __ xabort(0);
1798   }
1799 
1800   // Calculate the difference between rsp and rbp,. We need to know it
1801   // after the native call because on windows Java Natives will pop
1802   // the arguments and it is painful to do rsp relative addressing
1803   // in a platform independent way. So after the call we switch to
1804   // rbp, relative addressing.
1805 
1806   int fp_adjustment = stack_size - 2*wordSize;
1807 
1808 #ifdef COMPILER2
1809   // C2 may leave the stack dirty if not in SSE2+ mode
1810   if (UseSSE >= 2) {
1811     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1812   } else {
1813     __ empty_FPU_stack();
1814   }
1815 #endif /* COMPILER2 */
1816 
1817   // Compute the rbp, offset for any slots used after the jni call
1818 
1819   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1820 
1821   // We use rdi as a thread pointer because it is callee save and
1822   // if we load it once it is usable thru the entire wrapper
1823   const Register thread = rdi;
1824 
1825   // We use rsi as the oop handle for the receiver/klass
1826   // It is callee save so it survives the call to native
1827 
1828   const Register oop_handle_reg = rsi;
1829 
1830   __ get_thread(thread);
1831 
1832   if (is_critical_native) {
1833     check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
1834                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1835   }
1836 
1837   //
1838   // We immediately shuffle the arguments so that any vm call we have to
1839   // make from here on out (sync slow path, jvmti, etc.) we will have
1840   // captured the oops from our caller and have a valid oopMap for
1841   // them.
1842 
1843   // -----------------
1844   // The Grand Shuffle
1845   //
1846   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1847   // and, if static, the class mirror instead of a receiver.  This pretty much
1848   // guarantees that register layout will not match (and x86 doesn't use reg
1849   // parms though amd does).  Since the native abi doesn't use register args
1850   // and the java conventions does we don't have to worry about collisions.
1851   // All of our moved are reg->stack or stack->stack.
1852   // We ignore the extra arguments during the shuffle and handle them at the
1853   // last moment. The shuffle is described by the two calling convention
1854   // vectors we have in our possession. We simply walk the java vector to
1855   // get the source locations and the c vector to get the destinations.
1856 
1857   int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
1858 
1859   // Record rsp-based slot for receiver on stack for non-static methods
1860   int receiver_offset = -1;
1861 
1862   // This is a trick. We double the stack slots so we can claim
1863   // the oops in the caller's frame. Since we are sure to have
1864   // more args than the caller doubling is enough to make
1865   // sure we can capture all the incoming oop args from the
1866   // caller.
1867   //
1868   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1869 
1870   // Mark location of rbp,
1871   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1872 
1873   // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1874   // Are free to temporaries if we have to do  stack to steck moves.
1875   // All inbound args are referenced based on rbp, and all outbound args via rsp.
1876 
1877   for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1878     switch (in_sig_bt[i]) {
1879       case T_ARRAY:
1880         if (is_critical_native) {
1881           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1882           c_arg++;
1883           break;
1884         }
1885       case T_OBJECT:
1886         assert(!is_critical_native, "no oop arguments");
1887         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1888                     ((i == 0) && (!is_static)),
1889                     &receiver_offset);
1890         break;
1891       case T_VOID:
1892         break;
1893 
1894       case T_FLOAT:
1895         float_move(masm, in_regs[i], out_regs[c_arg]);
1896           break;
1897 
1898       case T_DOUBLE:
1899         assert( i + 1 < total_in_args &&
1900                 in_sig_bt[i + 1] == T_VOID &&
1901                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1902         double_move(masm, in_regs[i], out_regs[c_arg]);
1903         break;
1904 
1905       case T_LONG :
1906         long_move(masm, in_regs[i], out_regs[c_arg]);
1907         break;
1908 
1909       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1910 
1911       default:
1912         simple_move32(masm, in_regs[i], out_regs[c_arg]);
1913     }
1914   }
1915 
1916   // Pre-load a static method's oop into rsi.  Used both by locking code and
1917   // the normal JNI call code.
1918   if (method->is_static() && !is_critical_native) {
1919 
1920     //  load opp into a register
1921     __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1922 
1923     // Now handlize the static class mirror it's known not-null.
1924     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1925     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1926 
1927     // Now get the handle
1928     __ lea(oop_handle_reg, Address(rsp, klass_offset));
1929     // store the klass handle as second argument
1930     __ movptr(Address(rsp, wordSize), oop_handle_reg);
1931   }
1932 
1933   // Change state to native (we save the return address in the thread, since it might not
1934   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1935   // points into the right code segment. It does not have to be the correct return pc.
1936   // We use the same pc/oopMap repeatedly when we call out
1937 
1938   intptr_t the_pc = (intptr_t) __ pc();
1939   oop_maps->add_gc_map(the_pc - start, map);
1940 
1941   __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
1942 
1943 
1944   // We have all of the arguments setup at this point. We must not touch any register
1945   // argument registers at this point (what if we save/restore them there are no oop?
1946 
1947   {
1948     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1949     __ mov_metadata(rax, method());
1950     __ call_VM_leaf(
1951          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1952          thread, rax);
1953   }
1954 
1955   // RedefineClasses() tracing support for obsolete method entry
1956   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1957     __ mov_metadata(rax, method());
1958     __ call_VM_leaf(
1959          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1960          thread, rax);
1961   }
1962 
1963   // These are register definitions we need for locking/unlocking
1964   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1965   const Register obj_reg  = rcx;  // Will contain the oop
1966   const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1967 
1968   Label slow_path_lock;
1969   Label lock_done;
1970 
1971   // Lock a synchronized method
1972   if (method->is_synchronized()) {
1973     assert(!is_critical_native, "unhandled");
1974 
1975 
1976     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1977 
1978     // Get the handle (the 2nd argument)
1979     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1980 
1981     // Get address of the box
1982 
1983     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1984 
1985     // Load the oop from the handle
1986     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1987 
1988     if (UseBiasedLocking) {
1989       // Note that oop_handle_reg is trashed during this call
1990       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
1991     }
1992 
1993     // Load immediate 1 into swap_reg %rax,
1994     __ movptr(swap_reg, 1);
1995 
1996     // Load (object->mark() | 1) into swap_reg %rax,
1997     __ orptr(swap_reg, Address(obj_reg, 0));
1998 
1999     // Save (object->mark() | 1) into BasicLock's displaced header
2000     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2001 
2002     if (os::is_MP()) {
2003       __ lock();
2004     }
2005 
2006     // src -> dest iff dest == rax, else rax, <- dest
2007     // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
2008     __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2009     __ jcc(Assembler::equal, lock_done);
2010 
2011     // Test if the oopMark is an obvious stack pointer, i.e.,
2012     //  1) (mark & 3) == 0, and
2013     //  2) rsp <= mark < mark + os::pagesize()
2014     // These 3 tests can be done by evaluating the following
2015     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2016     // assuming both stack pointer and pagesize have their
2017     // least significant 2 bits clear.
2018     // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
2019 
2020     __ subptr(swap_reg, rsp);
2021     __ andptr(swap_reg, 3 - os::vm_page_size());
2022 
2023     // Save the test result, for recursive case, the result is zero
2024     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2025     __ jcc(Assembler::notEqual, slow_path_lock);
2026     // Slow path will re-enter here
2027     __ bind(lock_done);
2028 
2029     if (UseBiasedLocking) {
2030       // Re-fetch oop_handle_reg as we trashed it above
2031       __ movptr(oop_handle_reg, Address(rsp, wordSize));
2032     }
2033   }
2034 
2035 
2036   // Finally just about ready to make the JNI call
2037 
2038 
2039   // get JNIEnv* which is first argument to native
2040   if (!is_critical_native) {
2041     __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
2042     __ movptr(Address(rsp, 0), rdx);
2043   }
2044 
2045   // Now set thread in native
2046   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
2047 
2048   __ call(RuntimeAddress(native_func));
2049 
2050   // Verify or restore cpu control state after JNI call
2051   __ restore_cpu_control_state_after_jni();
2052 
2053   // WARNING - on Windows Java Natives use pascal calling convention and pop the
2054   // arguments off of the stack. We could just re-adjust the stack pointer here
2055   // and continue to do SP relative addressing but we instead switch to FP
2056   // relative addressing.
2057 
2058   // Unpack native results.
2059   switch (ret_type) {
2060   case T_BOOLEAN: __ c2bool(rax);            break;
2061   case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
2062   case T_BYTE   : __ sign_extend_byte (rax); break;
2063   case T_SHORT  : __ sign_extend_short(rax); break;
2064   case T_INT    : /* nothing to do */        break;
2065   case T_DOUBLE :
2066   case T_FLOAT  :
2067     // Result is in st0 we'll save as needed
2068     break;
2069   case T_ARRAY:                 // Really a handle
2070   case T_OBJECT:                // Really a handle
2071       break; // can't de-handlize until after safepoint check
2072   case T_VOID: break;
2073   case T_LONG: break;
2074   default       : ShouldNotReachHere();
2075   }
2076 
2077   // Switch thread to "native transition" state before reading the synchronization state.
2078   // This additional state is necessary because reading and testing the synchronization
2079   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2080   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2081   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2082   //     Thread A is resumed to finish this native method, but doesn't block here since it
2083   //     didn't see any synchronization is progress, and escapes.
2084   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2085 
2086   if(os::is_MP()) {
2087     if (UseMembar) {
2088       // Force this write out before the read below
2089       __ membar(Assembler::Membar_mask_bits(
2090            Assembler::LoadLoad | Assembler::LoadStore |
2091            Assembler::StoreLoad | Assembler::StoreStore));
2092     } else {
2093       // Write serialization page so VM thread can do a pseudo remote membar.
2094       // We use the current thread pointer to calculate a thread specific
2095       // offset to write to within the page. This minimizes bus traffic
2096       // due to cache line collision.
2097       __ serialize_memory(thread, rcx);
2098     }
2099   }
2100 
2101   if (AlwaysRestoreFPU) {
2102     // Make sure the control word is correct.
2103     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2104   }
2105 
2106   Label after_transition;
2107 
2108   // check for safepoint operation in progress and/or pending suspend requests
2109   { Label Continue;
2110 
2111     __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2112              SafepointSynchronize::_not_synchronized);
2113 
2114     Label L;
2115     __ jcc(Assembler::notEqual, L);
2116     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
2117     __ jcc(Assembler::equal, Continue);
2118     __ bind(L);
2119 
2120     // Don't use call_VM as it will see a possible pending exception and forward it
2121     // and never return here preventing us from clearing _last_native_pc down below.
2122     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2123     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2124     // by hand.
2125     //
2126     save_native_result(masm, ret_type, stack_slots);
2127     __ push(thread);
2128     if (!is_critical_native) {
2129       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2130                                               JavaThread::check_special_condition_for_native_trans)));
2131     } else {
2132       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2133                                               JavaThread::check_special_condition_for_native_trans_and_transition)));
2134     }
2135     __ increment(rsp, wordSize);
2136     // Restore any method result value
2137     restore_native_result(masm, ret_type, stack_slots);
2138 
2139     if (is_critical_native) {
2140       // The call above performed the transition to thread_in_Java so
2141       // skip the transition logic below.
2142       __ jmpb(after_transition);
2143     }
2144 
2145     __ bind(Continue);
2146   }
2147 
2148   // change thread state
2149   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
2150   __ bind(after_transition);
2151 
2152   Label reguard;
2153   Label reguard_done;
2154   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2155   __ jcc(Assembler::equal, reguard);
2156 
2157   // slow path reguard  re-enters here
2158   __ bind(reguard_done);
2159 
2160   // Handle possible exception (will unlock if necessary)
2161 
2162   // native result if any is live
2163 
2164   // Unlock
2165   Label slow_path_unlock;
2166   Label unlock_done;
2167   if (method->is_synchronized()) {
2168 
2169     Label done;
2170 
2171     // Get locked oop from the handle we passed to jni
2172     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2173 
2174     if (UseBiasedLocking) {
2175       __ biased_locking_exit(obj_reg, rbx, done);
2176     }
2177 
2178     // Simple recursive lock?
2179 
2180     __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2181     __ jcc(Assembler::equal, done);
2182 
2183     // Must save rax, if if it is live now because cmpxchg must use it
2184     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2185       save_native_result(masm, ret_type, stack_slots);
2186     }
2187 
2188     //  get old displaced header
2189     __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2190 
2191     // get address of the stack lock
2192     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2193 
2194     // Atomic swap old header if oop still contains the stack lock
2195     if (os::is_MP()) {
2196     __ lock();
2197     }
2198 
2199     // src -> dest iff dest == rax, else rax, <- dest
2200     // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2201     __ cmpxchgptr(rbx, Address(obj_reg, 0));
2202     __ jcc(Assembler::notEqual, slow_path_unlock);
2203 
2204     // slow path re-enters here
2205     __ bind(unlock_done);
2206     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2207       restore_native_result(masm, ret_type, stack_slots);
2208     }
2209 
2210     __ bind(done);
2211 
2212   }
2213 
2214   {
2215     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2216     // Tell dtrace about this method exit
2217     save_native_result(masm, ret_type, stack_slots);
2218     __ mov_metadata(rax, method());
2219     __ call_VM_leaf(
2220          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2221          thread, rax);
2222     restore_native_result(masm, ret_type, stack_slots);
2223   }
2224 
2225   // We can finally stop using that last_Java_frame we setup ages ago
2226 
2227   __ reset_last_Java_frame(thread, false);
2228 
2229   // Unpack oop result
2230   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2231       Label L;
2232       __ cmpptr(rax, (int32_t)NULL_WORD);
2233       __ jcc(Assembler::equal, L);
2234       __ movptr(rax, Address(rax, 0));
2235       __ bind(L);
2236       __ verify_oop(rax);
2237   }
2238 
2239   if (CheckJNICalls) {
2240     // clear_pending_jni_exception_check
2241     __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2242   }
2243 
2244   if (!is_critical_native) {
2245     // reset handle block
2246     __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
2247     __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
2248 
2249     // Any exception pending?
2250     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2251     __ jcc(Assembler::notEqual, exception_pending);
2252   }
2253 
2254   // no exception, we're almost done
2255 
2256   // check that only result value is on FPU stack
2257   __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
2258 
2259   // Fixup floating pointer results so that result looks like a return from a compiled method
2260   if (ret_type == T_FLOAT) {
2261     if (UseSSE >= 1) {
2262       // Pop st0 and store as float and reload into xmm register
2263       __ fstp_s(Address(rbp, -4));
2264       __ movflt(xmm0, Address(rbp, -4));
2265     }
2266   } else if (ret_type == T_DOUBLE) {
2267     if (UseSSE >= 2) {
2268       // Pop st0 and store as double and reload into xmm register
2269       __ fstp_d(Address(rbp, -8));
2270       __ movdbl(xmm0, Address(rbp, -8));
2271     }
2272   }
2273 
2274   // Return
2275 
2276   __ leave();
2277   __ ret(0);
2278 
2279   // Unexpected paths are out of line and go here
2280 
2281   // Slow path locking & unlocking
2282   if (method->is_synchronized()) {
2283 
2284     // BEGIN Slow path lock
2285 
2286     __ bind(slow_path_lock);
2287 
2288     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2289     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2290     __ push(thread);
2291     __ push(lock_reg);
2292     __ push(obj_reg);
2293     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
2294     __ addptr(rsp, 3*wordSize);
2295 
2296 #ifdef ASSERT
2297     { Label L;
2298     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2299     __ jcc(Assembler::equal, L);
2300     __ stop("no pending exception allowed on exit from monitorenter");
2301     __ bind(L);
2302     }
2303 #endif
2304     __ jmp(lock_done);
2305 
2306     // END Slow path lock
2307 
2308     // BEGIN Slow path unlock
2309     __ bind(slow_path_unlock);
2310 
2311     // Slow path unlock
2312 
2313     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2314       save_native_result(masm, ret_type, stack_slots);
2315     }
2316     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2317 
2318     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2319     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2320 
2321 
2322     // should be a peal
2323     // +wordSize because of the push above
2324     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2325     __ push(thread);
2326     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2327     __ push(rax);
2328 
2329     __ push(obj_reg);
2330     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2331     __ addptr(rsp, 3*wordSize);
2332 #ifdef ASSERT
2333     {
2334       Label L;
2335       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2336       __ jcc(Assembler::equal, L);
2337       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2338       __ bind(L);
2339     }
2340 #endif /* ASSERT */
2341 
2342     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2343 
2344     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2345       restore_native_result(masm, ret_type, stack_slots);
2346     }
2347     __ jmp(unlock_done);
2348     // END Slow path unlock
2349 
2350   }
2351 
2352   // SLOW PATH Reguard the stack if needed
2353 
2354   __ bind(reguard);
2355   save_native_result(masm, ret_type, stack_slots);
2356   {
2357     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2358   }
2359   restore_native_result(masm, ret_type, stack_slots);
2360   __ jmp(reguard_done);
2361 
2362 
2363   // BEGIN EXCEPTION PROCESSING
2364 
2365   if (!is_critical_native) {
2366     // Forward  the exception
2367     __ bind(exception_pending);
2368 
2369     // remove possible return value from FPU register stack
2370     __ empty_FPU_stack();
2371 
2372     // pop our frame
2373     __ leave();
2374     // and forward the exception
2375     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2376   }
2377 
2378   __ flush();
2379 
2380   nmethod *nm = nmethod::new_native_nmethod(method,
2381                                             compile_id,
2382                                             masm->code(),
2383                                             vep_offset,
2384                                             frame_complete,
2385                                             stack_slots / VMRegImpl::slots_per_word,
2386                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2387                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2388                                             oop_maps);
2389 
2390   if (is_critical_native) {
2391     nm->set_lazy_critical_native(true);
2392   }
2393 
2394   return nm;
2395 
2396 }
2397 
2398 // this function returns the adjust size (in number of words) to a c2i adapter
2399 // activation for use during deoptimization
2400 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2401   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2402 }
2403 
2404 
2405 uint SharedRuntime::out_preserve_stack_slots() {
2406   return 0;
2407 }
2408 
2409 //------------------------------generate_deopt_blob----------------------------
2410 void SharedRuntime::generate_deopt_blob() {
2411   // allocate space for the code
2412   ResourceMark rm;
2413   // setup code generation tools
2414   // note: the buffer code size must account for StackShadowPages=50
2415   CodeBuffer   buffer("deopt_blob", 1536, 1024);
2416   MacroAssembler* masm = new MacroAssembler(&buffer);
2417   int frame_size_in_words;
2418   OopMap* map = NULL;
2419   // Account for the extra args we place on the stack
2420   // by the time we call fetch_unroll_info
2421   const int additional_words = 2; // deopt kind, thread
2422 
2423   OopMapSet *oop_maps = new OopMapSet();
2424 
2425   // -------------
2426   // This code enters when returning to a de-optimized nmethod.  A return
2427   // address has been pushed on the the stack, and return values are in
2428   // registers.
2429   // If we are doing a normal deopt then we were called from the patched
2430   // nmethod from the point we returned to the nmethod. So the return
2431   // address on the stack is wrong by NativeCall::instruction_size
2432   // We will adjust the value to it looks like we have the original return
2433   // address on the stack (like when we eagerly deoptimized).
2434   // In the case of an exception pending with deoptimized then we enter
2435   // with a return address on the stack that points after the call we patched
2436   // into the exception handler. We have the following register state:
2437   //    rax,: exception
2438   //    rbx,: exception handler
2439   //    rdx: throwing pc
2440   // So in this case we simply jam rdx into the useless return address and
2441   // the stack looks just like we want.
2442   //
2443   // At this point we need to de-opt.  We save the argument return
2444   // registers.  We call the first C routine, fetch_unroll_info().  This
2445   // routine captures the return values and returns a structure which
2446   // describes the current frame size and the sizes of all replacement frames.
2447   // The current frame is compiled code and may contain many inlined
2448   // functions, each with their own JVM state.  We pop the current frame, then
2449   // push all the new frames.  Then we call the C routine unpack_frames() to
2450   // populate these frames.  Finally unpack_frames() returns us the new target
2451   // address.  Notice that callee-save registers are BLOWN here; they have
2452   // already been captured in the vframeArray at the time the return PC was
2453   // patched.
2454   address start = __ pc();
2455   Label cont;
2456 
2457   // Prolog for non exception case!
2458 
2459   // Save everything in sight.
2460 
2461   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2462   // Normal deoptimization
2463   __ push(Deoptimization::Unpack_deopt);
2464   __ jmp(cont);
2465 
2466   int reexecute_offset = __ pc() - start;
2467 
2468   // Reexecute case
2469   // return address is the pc describes what bci to do re-execute at
2470 
2471   // No need to update map as each call to save_live_registers will produce identical oopmap
2472   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2473 
2474   __ push(Deoptimization::Unpack_reexecute);
2475   __ jmp(cont);
2476 
2477   int exception_offset = __ pc() - start;
2478 
2479   // Prolog for exception case
2480 
2481   // all registers are dead at this entry point, except for rax, and
2482   // rdx which contain the exception oop and exception pc
2483   // respectively.  Set them in TLS and fall thru to the
2484   // unpack_with_exception_in_tls entry point.
2485 
2486   __ get_thread(rdi);
2487   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2488   __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2489 
2490   int exception_in_tls_offset = __ pc() - start;
2491 
2492   // new implementation because exception oop is now passed in JavaThread
2493 
2494   // Prolog for exception case
2495   // All registers must be preserved because they might be used by LinearScan
2496   // Exceptiop oop and throwing PC are passed in JavaThread
2497   // tos: stack at point of call to method that threw the exception (i.e. only
2498   // args are on the stack, no return address)
2499 
2500   // make room on stack for the return address
2501   // It will be patched later with the throwing pc. The correct value is not
2502   // available now because loading it from memory would destroy registers.
2503   __ push(0);
2504 
2505   // Save everything in sight.
2506 
2507   // No need to update map as each call to save_live_registers will produce identical oopmap
2508   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2509 
2510   // Now it is safe to overwrite any register
2511 
2512   // store the correct deoptimization type
2513   __ push(Deoptimization::Unpack_exception);
2514 
2515   // load throwing pc from JavaThread and patch it as the return address
2516   // of the current frame. Then clear the field in JavaThread
2517   __ get_thread(rdi);
2518   __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2519   __ movptr(Address(rbp, wordSize), rdx);
2520   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2521 
2522 #ifdef ASSERT
2523   // verify that there is really an exception oop in JavaThread
2524   __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2525   __ verify_oop(rax);
2526 
2527   // verify that there is no pending exception
2528   Label no_pending_exception;
2529   __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2530   __ testptr(rax, rax);
2531   __ jcc(Assembler::zero, no_pending_exception);
2532   __ stop("must not have pending exception here");
2533   __ bind(no_pending_exception);
2534 #endif
2535 
2536   __ bind(cont);
2537 
2538   // Compiled code leaves the floating point stack dirty, empty it.
2539   __ empty_FPU_stack();
2540 
2541 
2542   // Call C code.  Need thread and this frame, but NOT official VM entry
2543   // crud.  We cannot block on this call, no GC can happen.
2544   __ get_thread(rcx);
2545   __ push(rcx);
2546   // fetch_unroll_info needs to call last_java_frame()
2547   __ set_last_Java_frame(rcx, noreg, noreg, NULL);
2548 
2549   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2550 
2551   // Need to have an oopmap that tells fetch_unroll_info where to
2552   // find any register it might need.
2553 
2554   oop_maps->add_gc_map( __ pc()-start, map);
2555 
2556   // Discard args to fetch_unroll_info
2557   __ pop(rcx);
2558   __ pop(rcx);
2559 
2560   __ get_thread(rcx);
2561   __ reset_last_Java_frame(rcx, false);
2562 
2563   // Load UnrollBlock into EDI
2564   __ mov(rdi, rax);
2565 
2566   // Move the unpack kind to a safe place in the UnrollBlock because
2567   // we are very short of registers
2568 
2569   Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2570   // retrieve the deopt kind from the UnrollBlock.
2571   __ movl(rax, unpack_kind);
2572 
2573    Label noException;
2574   __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
2575   __ jcc(Assembler::notEqual, noException);
2576   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2577   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2578   __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2579   __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2580 
2581   __ verify_oop(rax);
2582 
2583   // Overwrite the result registers with the exception results.
2584   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2585   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2586 
2587   __ bind(noException);
2588 
2589   // Stack is back to only having register save data on the stack.
2590   // Now restore the result registers. Everything else is either dead or captured
2591   // in the vframeArray.
2592 
2593   RegisterSaver::restore_result_registers(masm);
2594 
2595   // Non standard control word may be leaked out through a safepoint blob, and we can
2596   // deopt at a poll point with the non standard control word. However, we should make
2597   // sure the control word is correct after restore_result_registers.
2598   __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2599 
2600   // All of the register save area has been popped of the stack. Only the
2601   // return address remains.
2602 
2603   // Pop all the frames we must move/replace.
2604   //
2605   // Frame picture (youngest to oldest)
2606   // 1: self-frame (no frame link)
2607   // 2: deopting frame  (no frame link)
2608   // 3: caller of deopting frame (could be compiled/interpreted).
2609   //
2610   // Note: by leaving the return address of self-frame on the stack
2611   // and using the size of frame 2 to adjust the stack
2612   // when we are done the return to frame 3 will still be on the stack.
2613 
2614   // Pop deoptimized frame
2615   __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2616 
2617   // sp should be pointing at the return address to the caller (3)
2618 
2619   // Pick up the initial fp we should save
2620   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2621   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2622 
2623 #ifdef ASSERT
2624   // Compilers generate code that bang the stack by as much as the
2625   // interpreter would need. So this stack banging should never
2626   // trigger a fault. Verify that it does not on non product builds.
2627   if (UseStackBanging) {
2628     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2629     __ bang_stack_size(rbx, rcx);
2630   }
2631 #endif
2632 
2633   // Load array of frame pcs into ECX
2634   __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2635 
2636   __ pop(rsi); // trash the old pc
2637 
2638   // Load array of frame sizes into ESI
2639   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2640 
2641   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2642 
2643   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2644   __ movl(counter, rbx);
2645 
2646   // Now adjust the caller's stack to make up for the extra locals
2647   // but record the original sp so that we can save it in the skeletal interpreter
2648   // frame and the stack walking of interpreter_sender will get the unextended sp
2649   // value and not the "real" sp value.
2650 
2651   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2652   __ movptr(sp_temp, rsp);
2653   __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2654   __ subptr(rsp, rbx);
2655 
2656   // Push interpreter frames in a loop
2657   Label loop;
2658   __ bind(loop);
2659   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2660   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2661   __ pushptr(Address(rcx, 0));          // save return address
2662   __ enter();                           // save old & set new rbp,
2663   __ subptr(rsp, rbx);                  // Prolog!
2664   __ movptr(rbx, sp_temp);              // sender's sp
2665   // This value is corrected by layout_activation_impl
2666   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2667   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2668   __ movptr(sp_temp, rsp);              // pass to next frame
2669   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2670   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2671   __ decrementl(counter);             // decrement counter
2672   __ jcc(Assembler::notZero, loop);
2673   __ pushptr(Address(rcx, 0));          // save final return address
2674 
2675   // Re-push self-frame
2676   __ enter();                           // save old & set new rbp,
2677 
2678   //  Return address and rbp, are in place
2679   // We'll push additional args later. Just allocate a full sized
2680   // register save area
2681   __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2682 
2683   // Restore frame locals after moving the frame
2684   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2685   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2686   __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
2687   if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2688   if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2689 
2690   // Set up the args to unpack_frame
2691 
2692   __ pushl(unpack_kind);                     // get the unpack_kind value
2693   __ get_thread(rcx);
2694   __ push(rcx);
2695 
2696   // set last_Java_sp, last_Java_fp
2697   __ set_last_Java_frame(rcx, noreg, rbp, NULL);
2698 
2699   // Call C code.  Need thread but NOT official VM entry
2700   // crud.  We cannot block on this call, no GC can happen.  Call should
2701   // restore return values to their stack-slots with the new SP.
2702   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2703   // Set an oopmap for the call site
2704   oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2705 
2706   // rax, contains the return result type
2707   __ push(rax);
2708 
2709   __ get_thread(rcx);
2710   __ reset_last_Java_frame(rcx, false);
2711 
2712   // Collect return values
2713   __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2714   __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2715 
2716   // Clear floating point stack before returning to interpreter
2717   __ empty_FPU_stack();
2718 
2719   // Check if we should push the float or double return value.
2720   Label results_done, yes_double_value;
2721   __ cmpl(Address(rsp, 0), T_DOUBLE);
2722   __ jcc (Assembler::zero, yes_double_value);
2723   __ cmpl(Address(rsp, 0), T_FLOAT);
2724   __ jcc (Assembler::notZero, results_done);
2725 
2726   // return float value as expected by interpreter
2727   if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2728   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2729   __ jmp(results_done);
2730 
2731   // return double value as expected by interpreter
2732   __ bind(yes_double_value);
2733   if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2734   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2735 
2736   __ bind(results_done);
2737 
2738   // Pop self-frame.
2739   __ leave();                              // Epilog!
2740 
2741   // Jump to interpreter
2742   __ ret(0);
2743 
2744   // -------------
2745   // make sure all code is generated
2746   masm->flush();
2747 
2748   _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2749   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2750 }
2751 
2752 
2753 #ifdef COMPILER2
2754 //------------------------------generate_uncommon_trap_blob--------------------
2755 void SharedRuntime::generate_uncommon_trap_blob() {
2756   // allocate space for the code
2757   ResourceMark rm;
2758   // setup code generation tools
2759   CodeBuffer   buffer("uncommon_trap_blob", 512, 512);
2760   MacroAssembler* masm = new MacroAssembler(&buffer);
2761 
2762   enum frame_layout {
2763     arg0_off,      // thread                     sp + 0 // Arg location for
2764     arg1_off,      // unloaded_class_index       sp + 1 // calling C
2765     arg2_off,      // exec_mode                  sp + 2
2766     // The frame sender code expects that rbp will be in the "natural" place and
2767     // will override any oopMap setting for it. We must therefore force the layout
2768     // so that it agrees with the frame sender code.
2769     rbp_off,       // callee saved register      sp + 3
2770     return_off,    // slot for return address    sp + 4
2771     framesize
2772   };
2773 
2774   address start = __ pc();
2775 
2776   if (UseRTMLocking) {
2777     // Abort RTM transaction before possible nmethod deoptimization.
2778     __ xabort(0);
2779   }
2780 
2781   // Push self-frame.
2782   __ subptr(rsp, return_off*wordSize);     // Epilog!
2783 
2784   // rbp, is an implicitly saved callee saved register (i.e. the calling
2785   // convention will save restore it in prolog/epilog) Other than that
2786   // there are no callee save registers no that adapter frames are gone.
2787   __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2788 
2789   // Clear the floating point exception stack
2790   __ empty_FPU_stack();
2791 
2792   // set last_Java_sp
2793   __ get_thread(rdx);
2794   __ set_last_Java_frame(rdx, noreg, noreg, NULL);
2795 
2796   // Call C code.  Need thread but NOT official VM entry
2797   // crud.  We cannot block on this call, no GC can happen.  Call should
2798   // capture callee-saved registers as well as return values.
2799   __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2800   // argument already in ECX
2801   __ movl(Address(rsp, arg1_off*wordSize),rcx);
2802   __ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2803   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2804 
2805   // Set an oopmap for the call site
2806   OopMapSet *oop_maps = new OopMapSet();
2807   OopMap* map =  new OopMap( framesize, 0 );
2808   // No oopMap for rbp, it is known implicitly
2809 
2810   oop_maps->add_gc_map( __ pc()-start, map);
2811 
2812   __ get_thread(rcx);
2813 
2814   __ reset_last_Java_frame(rcx, false);
2815 
2816   // Load UnrollBlock into EDI
2817   __ movptr(rdi, rax);
2818 
2819 #ifdef ASSERT
2820   { Label L;
2821     __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
2822             (int32_t)Deoptimization::Unpack_uncommon_trap);
2823     __ jcc(Assembler::equal, L);
2824     __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
2825     __ bind(L);
2826   }
2827 #endif
2828 
2829   // Pop all the frames we must move/replace.
2830   //
2831   // Frame picture (youngest to oldest)
2832   // 1: self-frame (no frame link)
2833   // 2: deopting frame  (no frame link)
2834   // 3: caller of deopting frame (could be compiled/interpreted).
2835 
2836   // Pop self-frame.  We have no frame, and must rely only on EAX and ESP.
2837   __ addptr(rsp,(framesize-1)*wordSize);     // Epilog!
2838 
2839   // Pop deoptimized frame
2840   __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2841   __ addptr(rsp, rcx);
2842 
2843   // sp should be pointing at the return address to the caller (3)
2844 
2845   // Pick up the initial fp we should save
2846   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2847   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2848 
2849 #ifdef ASSERT
2850   // Compilers generate code that bang the stack by as much as the
2851   // interpreter would need. So this stack banging should never
2852   // trigger a fault. Verify that it does not on non product builds.
2853   if (UseStackBanging) {
2854     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2855     __ bang_stack_size(rbx, rcx);
2856   }
2857 #endif
2858 
2859   // Load array of frame pcs into ECX
2860   __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2861 
2862   __ pop(rsi); // trash the pc
2863 
2864   // Load array of frame sizes into ESI
2865   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2866 
2867   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2868 
2869   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2870   __ movl(counter, rbx);
2871 
2872   // Now adjust the caller's stack to make up for the extra locals
2873   // but record the original sp so that we can save it in the skeletal interpreter
2874   // frame and the stack walking of interpreter_sender will get the unextended sp
2875   // value and not the "real" sp value.
2876 
2877   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2878   __ movptr(sp_temp, rsp);
2879   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2880   __ subptr(rsp, rbx);
2881 
2882   // Push interpreter frames in a loop
2883   Label loop;
2884   __ bind(loop);
2885   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2886   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2887   __ pushptr(Address(rcx, 0));          // save return address
2888   __ enter();                           // save old & set new rbp,
2889   __ subptr(rsp, rbx);                  // Prolog!
2890   __ movptr(rbx, sp_temp);              // sender's sp
2891   // This value is corrected by layout_activation_impl
2892   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2893   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2894   __ movptr(sp_temp, rsp);              // pass to next frame
2895   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2896   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2897   __ decrementl(counter);             // decrement counter
2898   __ jcc(Assembler::notZero, loop);
2899   __ pushptr(Address(rcx, 0));            // save final return address
2900 
2901   // Re-push self-frame
2902   __ enter();                           // save old & set new rbp,
2903   __ subptr(rsp, (framesize-2) * wordSize);   // Prolog!
2904 
2905 
2906   // set last_Java_sp, last_Java_fp
2907   __ get_thread(rdi);
2908   __ set_last_Java_frame(rdi, noreg, rbp, NULL);
2909 
2910   // Call C code.  Need thread but NOT official VM entry
2911   // crud.  We cannot block on this call, no GC can happen.  Call should
2912   // restore return values to their stack-slots with the new SP.
2913   __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2914   __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2915   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2916   // Set an oopmap for the call site
2917   oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2918 
2919   __ get_thread(rdi);
2920   __ reset_last_Java_frame(rdi, true);
2921 
2922   // Pop self-frame.
2923   __ leave();     // Epilog!
2924 
2925   // Jump to interpreter
2926   __ ret(0);
2927 
2928   // -------------
2929   // make sure all code is generated
2930   masm->flush();
2931 
2932    _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2933 }
2934 #endif // COMPILER2
2935 
2936 //------------------------------generate_handler_blob------
2937 //
2938 // Generate a special Compile2Runtime blob that saves all registers,
2939 // setup oopmap, and calls safepoint code to stop the compiled code for
2940 // a safepoint.
2941 //
2942 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2943 
2944   // Account for thread arg in our frame
2945   const int additional_words = 1;
2946   int frame_size_in_words;
2947 
2948   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2949 
2950   ResourceMark rm;
2951   OopMapSet *oop_maps = new OopMapSet();
2952   OopMap* map;
2953 
2954   // allocate space for the code
2955   // setup code generation tools
2956   CodeBuffer   buffer("handler_blob", 1024, 512);
2957   MacroAssembler* masm = new MacroAssembler(&buffer);
2958 
2959   const Register java_thread = rdi; // callee-saved for VC++
2960   address start   = __ pc();
2961   address call_pc = NULL;
2962   bool cause_return = (poll_type == POLL_AT_RETURN);
2963   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2964 
2965   if (UseRTMLocking) {
2966     // Abort RTM transaction before calling runtime
2967     // because critical section will be large and will be
2968     // aborted anyway. Also nmethod could be deoptimized.
2969     __ xabort(0);
2970   }
2971 
2972   // If cause_return is true we are at a poll_return and there is
2973   // the return address on the stack to the caller on the nmethod
2974   // that is safepoint. We can leave this return on the stack and
2975   // effectively complete the return and safepoint in the caller.
2976   // Otherwise we push space for a return address that the safepoint
2977   // handler will install later to make the stack walking sensible.
2978   if (!cause_return)
2979     __ push(rbx);  // Make room for return address (or push it again)
2980 
2981   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
2982 
2983   // The following is basically a call_VM. However, we need the precise
2984   // address of the call in order to generate an oopmap. Hence, we do all the
2985   // work ourselves.
2986 
2987   // Push thread argument and setup last_Java_sp
2988   __ get_thread(java_thread);
2989   __ push(java_thread);
2990   __ set_last_Java_frame(java_thread, noreg, noreg, NULL);
2991 
2992   // if this was not a poll_return then we need to correct the return address now.
2993   if (!cause_return) {
2994     __ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
2995     __ movptr(Address(rbp, wordSize), rax);
2996   }
2997 
2998   // do the call
2999   __ call(RuntimeAddress(call_ptr));
3000 
3001   // Set an oopmap for the call site.  This oopmap will map all
3002   // oop-registers and debug-info registers as callee-saved.  This
3003   // will allow deoptimization at this safepoint to find all possible
3004   // debug-info recordings, as well as let GC find all oops.
3005 
3006   oop_maps->add_gc_map( __ pc() - start, map);
3007 
3008   // Discard arg
3009   __ pop(rcx);
3010 
3011   Label noException;
3012 
3013   // Clear last_Java_sp again
3014   __ get_thread(java_thread);
3015   __ reset_last_Java_frame(java_thread, false);
3016 
3017   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3018   __ jcc(Assembler::equal, noException);
3019 
3020   // Exception pending
3021   RegisterSaver::restore_live_registers(masm, save_vectors);
3022 
3023   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3024 
3025   __ bind(noException);
3026 
3027   // Normal exit, register restoring and exit
3028   RegisterSaver::restore_live_registers(masm, save_vectors);
3029 
3030   __ ret(0);
3031 
3032   // make sure all code is generated
3033   masm->flush();
3034 
3035   // Fill-out other meta info
3036   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3037 }
3038 
3039 //
3040 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3041 //
3042 // Generate a stub that calls into vm to find out the proper destination
3043 // of a java call. All the argument registers are live at this point
3044 // but since this is generic code we don't know what they are and the caller
3045 // must do any gc of the args.
3046 //
3047 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3048   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3049 
3050   // allocate space for the code
3051   ResourceMark rm;
3052 
3053   CodeBuffer buffer(name, 1000, 512);
3054   MacroAssembler* masm                = new MacroAssembler(&buffer);
3055 
3056   int frame_size_words;
3057   enum frame_layout {
3058                 thread_off,
3059                 extra_words };
3060 
3061   OopMapSet *oop_maps = new OopMapSet();
3062   OopMap* map = NULL;
3063 
3064   int start = __ offset();
3065 
3066   map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
3067 
3068   int frame_complete = __ offset();
3069 
3070   const Register thread = rdi;
3071   __ get_thread(rdi);
3072 
3073   __ push(thread);
3074   __ set_last_Java_frame(thread, noreg, rbp, NULL);
3075 
3076   __ call(RuntimeAddress(destination));
3077 
3078 
3079   // Set an oopmap for the call site.
3080   // We need this not only for callee-saved registers, but also for volatile
3081   // registers that the compiler might be keeping live across a safepoint.
3082 
3083   oop_maps->add_gc_map( __ offset() - start, map);
3084 
3085   // rax, contains the address we are going to jump to assuming no exception got installed
3086 
3087   __ addptr(rsp, wordSize);
3088 
3089   // clear last_Java_sp
3090   __ reset_last_Java_frame(thread, true);
3091   // check for pending exceptions
3092   Label pending;
3093   __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3094   __ jcc(Assembler::notEqual, pending);
3095 
3096   // get the returned Method*
3097   __ get_vm_result_2(rbx, thread);
3098   __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
3099 
3100   __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
3101 
3102   RegisterSaver::restore_live_registers(masm);
3103 
3104   // We are back the the original state on entry and ready to go.
3105 
3106   __ jmp(rax);
3107 
3108   // Pending exception after the safepoint
3109 
3110   __ bind(pending);
3111 
3112   RegisterSaver::restore_live_registers(masm);
3113 
3114   // exception pending => remove activation and forward to exception handler
3115 
3116   __ get_thread(thread);
3117   __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
3118   __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
3119   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3120 
3121   // -------------
3122   // make sure all code is generated
3123   masm->flush();
3124 
3125   // return the  blob
3126   // frame_size_words or bytes??
3127   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3128 }