1 /*
   2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "logging/log.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/compiledICHolder.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "runtime/vframeArray.hpp"
  37 #include "utilities/align.hpp"
  38 #include "vmreg_x86.inline.hpp"
  39 #ifdef COMPILER1
  40 #include "c1/c1_Runtime1.hpp"
  41 #endif
  42 #ifdef COMPILER2
  43 #include "opto/runtime.hpp"
  44 #endif
  45 #include "vm_version_x86.hpp"
  46 
  47 #define __ masm->
  48 
  49 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  50 
  51 class RegisterSaver {
  52   // Capture info about frame layout
  53 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  54   enum layout {
  55                 fpu_state_off = 0,
  56                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  57                 st0_off, st0H_off,
  58                 st1_off, st1H_off,
  59                 st2_off, st2H_off,
  60                 st3_off, st3H_off,
  61                 st4_off, st4H_off,
  62                 st5_off, st5H_off,
  63                 st6_off, st6H_off,
  64                 st7_off, st7H_off,
  65                 xmm_off,
  66                 DEF_XMM_OFFS(0),
  67                 DEF_XMM_OFFS(1),
  68                 DEF_XMM_OFFS(2),
  69                 DEF_XMM_OFFS(3),
  70                 DEF_XMM_OFFS(4),
  71                 DEF_XMM_OFFS(5),
  72                 DEF_XMM_OFFS(6),
  73                 DEF_XMM_OFFS(7),
  74                 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
  75                 rdi_off,
  76                 rsi_off,
  77                 ignore_off,  // extra copy of rbp,
  78                 rsp_off,
  79                 rbx_off,
  80                 rdx_off,
  81                 rcx_off,
  82                 rax_off,
  83                 // The frame sender code expects that rbp will be in the "natural" place and
  84                 // will override any oopMap setting for it. We must therefore force the layout
  85                 // so that it agrees with the frame sender code.
  86                 rbp_off,
  87                 return_off,      // slot for return address
  88                 reg_save_size };
  89   enum { FPU_regs_live = flags_off - fpu_state_end };
  90 
  91   public:
  92 
  93   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
  94                                      int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
  95   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
  96 
  97   static int rax_offset() { return rax_off; }
  98   static int rbx_offset() { return rbx_off; }
  99 
 100   // Offsets into the register save area
 101   // Used by deoptimization when it is managing result register
 102   // values on its own
 103 
 104   static int raxOffset(void) { return rax_off; }
 105   static int rdxOffset(void) { return rdx_off; }
 106   static int rbxOffset(void) { return rbx_off; }
 107   static int xmm0Offset(void) { return xmm0_off; }
 108   // This really returns a slot in the fp save area, which one is not important
 109   static int fpResultOffset(void) { return st0_off; }
 110 
 111   // During deoptimization only the result register need to be restored
 112   // all the other values have already been extracted.
 113 
 114   static void restore_result_registers(MacroAssembler* masm);
 115 
 116 };
 117 
 118 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 119                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 120   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 121   int ymm_bytes = num_xmm_regs * 16;
 122   int zmm_bytes = num_xmm_regs * 32;
 123 #ifdef COMPILER2
 124   if (save_vectors) {
 125     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 126     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 127     // Save upper half of YMM registers
 128     int vect_bytes = ymm_bytes;
 129     if (UseAVX > 2) {
 130       // Save upper half of ZMM registers as well
 131       vect_bytes += zmm_bytes;
 132     }
 133     additional_frame_words += vect_bytes / wordSize;
 134   }
 135 #else
 136   assert(!save_vectors, "vectors are generated only by C2");
 137 #endif
 138   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 139   int frame_words = frame_size_in_bytes / wordSize;
 140   *total_frame_words = frame_words;
 141 
 142   assert(FPUStateSizeInWords == 27, "update stack layout");
 143 
 144   // save registers, fpu state, and flags
 145   // We assume caller has already has return address slot on the stack
 146   // We push epb twice in this sequence because we want the real rbp,
 147   // to be under the return like a normal enter and we want to use pusha
 148   // We push by hand instead of pusing push
 149   __ enter();
 150   __ pusha();
 151   __ pushf();
 152   __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
 153   __ push_FPU_state();          // Save FPU state & init
 154 
 155   if (verify_fpu) {
 156     // Some stubs may have non standard FPU control word settings so
 157     // only check and reset the value when it required to be the
 158     // standard value.  The safepoint blob in particular can be used
 159     // in methods which are using the 24 bit control word for
 160     // optimized float math.
 161 
 162 #ifdef ASSERT
 163     // Make sure the control word has the expected value
 164     Label ok;
 165     __ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
 166     __ jccb(Assembler::equal, ok);
 167     __ stop("corrupted control word detected");
 168     __ bind(ok);
 169 #endif
 170 
 171     // Reset the control word to guard against exceptions being unmasked
 172     // since fstp_d can cause FPU stack underflow exceptions.  Write it
 173     // into the on stack copy and then reload that to make sure that the
 174     // current and future values are correct.
 175     __ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
 176   }
 177 
 178   __ frstor(Address(rsp, 0));
 179   if (!verify_fpu) {
 180     // Set the control word so that exceptions are masked for the
 181     // following code.
 182     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
 183   }
 184 
 185   int off = st0_off;
 186   int delta = st1_off - off;
 187 
 188   // Save the FPU registers in de-opt-able form
 189   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 190     __ fstp_d(Address(rsp, off*wordSize));
 191     off += delta;
 192   }
 193 
 194   off = xmm0_off;
 195   delta = xmm1_off - off;
 196   if(UseSSE == 1) {
 197     // Save the XMM state
 198     for (int n = 0; n < num_xmm_regs; n++) {
 199       __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
 200       off += delta;
 201     }
 202   } else if(UseSSE >= 2) {
 203     // Save whole 128bit (16 bytes) XMM registers
 204     for (int n = 0; n < num_xmm_regs; n++) {
 205       __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 206       off += delta;
 207     }
 208   }
 209 
 210   if (save_vectors) {
 211     __ subptr(rsp, ymm_bytes);
 212     // Save upper half of YMM registers
 213     for (int n = 0; n < num_xmm_regs; n++) {
 214       __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
 215     }
 216     if (UseAVX > 2) {
 217       __ subptr(rsp, zmm_bytes);
 218       // Save upper half of ZMM registers
 219       for (int n = 0; n < num_xmm_regs; n++) {
 220         __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
 221       }
 222     }
 223   }
 224   __ vzeroupper();
 225 
 226   // Set an oopmap for the call site.  This oopmap will map all
 227   // oop-registers and debug-info registers as callee-saved.  This
 228   // will allow deoptimization at this safepoint to find all possible
 229   // debug-info recordings, as well as let GC find all oops.
 230 
 231   OopMapSet *oop_maps = new OopMapSet();
 232   OopMap* map =  new OopMap( frame_words, 0 );
 233 
 234 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 235 #define NEXTREG(x) (x)->as_VMReg()->next()
 236 
 237   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 238   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 239   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 240   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 241   // rbp, location is known implicitly, no oopMap
 242   map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
 243   map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
 244   // %%% This is really a waste but we'll keep things as they were for now for the upper component
 245   off = st0_off;
 246   delta = st1_off - off;
 247   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 248     FloatRegister freg_name = as_FloatRegister(n);
 249     map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
 250     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
 251     off += delta;
 252   }
 253   off = xmm0_off;
 254   delta = xmm1_off - off;
 255   for (int n = 0; n < num_xmm_regs; n++) {
 256     XMMRegister xmm_name = as_XMMRegister(n);
 257     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 258     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
 259     off += delta;
 260   }
 261 #undef NEXTREG
 262 #undef STACK_OFFSET
 263 
 264   return map;
 265 }
 266 
 267 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 268   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 269   int ymm_bytes = num_xmm_regs * 16;
 270   int zmm_bytes = num_xmm_regs * 32;
 271   // Recover XMM & FPU state
 272   int additional_frame_bytes = 0;
 273 #ifdef COMPILER2
 274   if (restore_vectors) {
 275     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 276     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 277     // Save upper half of YMM registers
 278     additional_frame_bytes = ymm_bytes;
 279     if (UseAVX > 2) {
 280       // Save upper half of ZMM registers as well
 281       additional_frame_bytes += zmm_bytes;
 282     }
 283   }
 284 #else
 285   assert(!restore_vectors, "vectors are generated only by C2");
 286 #endif
 287 
 288   int off = xmm0_off;
 289   int delta = xmm1_off - off;
 290 
 291   __ vzeroupper();
 292 
 293   if (UseSSE == 1) {
 294     // Restore XMM registers
 295     assert(additional_frame_bytes == 0, "");
 296     for (int n = 0; n < num_xmm_regs; n++) {
 297       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 298       off += delta;
 299     }
 300   } else if (UseSSE >= 2) {
 301     // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
 302     // ZMM because the movdqu instruction zeros the upper part of the XMM register.
 303     for (int n = 0; n < num_xmm_regs; n++) {
 304       __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 305       off += delta;
 306     }
 307   }
 308 
 309   if (restore_vectors) {
 310     if (UseAVX > 2) {
 311       // Restore upper half of ZMM registers.
 312       for (int n = 0; n < num_xmm_regs; n++) {
 313         __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32));
 314       }
 315       __ addptr(rsp, zmm_bytes);
 316     }
 317     // Restore upper half of YMM registers.
 318     for (int n = 0; n < num_xmm_regs; n++) {
 319       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16));
 320     }
 321     __ addptr(rsp, ymm_bytes);
 322   }
 323 
 324   __ pop_FPU_state();
 325   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 326 
 327   __ popf();
 328   __ popa();
 329   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 330   __ pop(rbp);
 331 }
 332 
 333 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 334 
 335   // Just restore result register. Only used by deoptimization. By
 336   // now any callee save register that needs to be restore to a c2
 337   // caller of the deoptee has been extracted into the vframeArray
 338   // and will be stuffed into the c2i adapter we create for later
 339   // restoration so only result registers need to be restored here.
 340   //
 341 
 342   __ frstor(Address(rsp, 0));      // Restore fpu state
 343 
 344   // Recover XMM & FPU state
 345   if( UseSSE == 1 ) {
 346     __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
 347   } else if( UseSSE >= 2 ) {
 348     __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
 349   }
 350   __ movptr(rax, Address(rsp, rax_off*wordSize));
 351   __ movptr(rdx, Address(rsp, rdx_off*wordSize));
 352   // Pop all of the register save are off the stack except the return address
 353   __ addptr(rsp, return_off * wordSize);
 354 }
 355 
 356 // Is vector's size (in bytes) bigger than a size saved by default?
 357 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
 358 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
 359 bool SharedRuntime::is_wide_vector(int size) {
 360   return size > 16;
 361 }
 362 
 363 size_t SharedRuntime::trampoline_size() {
 364   return 16;
 365 }
 366 
 367 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 368   __ jump(RuntimeAddress(destination));
 369 }
 370 
 371 // The java_calling_convention describes stack locations as ideal slots on
 372 // a frame with no abi restrictions. Since we must observe abi restrictions
 373 // (like the placement of the register window) the slots must be biased by
 374 // the following value.
 375 static int reg2offset_in(VMReg r) {
 376   // Account for saved rbp, and return address
 377   // This should really be in_preserve_stack_slots
 378   return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
 379 }
 380 
 381 static int reg2offset_out(VMReg r) {
 382   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 383 }
 384 
 385 // ---------------------------------------------------------------------------
 386 // Read the array of BasicTypes from a signature, and compute where the
 387 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 388 // quantities.  Values less than SharedInfo::stack0 are registers, those above
 389 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 390 // as framesizes are fixed.
 391 // VMRegImpl::stack0 refers to the first slot 0(sp).
 392 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 393 // up to RegisterImpl::number_of_registers) are the 32-bit
 394 // integer registers.
 395 
 396 // Pass first two oop/int args in registers ECX and EDX.
 397 // Pass first two float/double args in registers XMM0 and XMM1.
 398 // Doubles have precedence, so if you pass a mix of floats and doubles
 399 // the doubles will grab the registers before the floats will.
 400 
 401 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 402 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 403 // units regardless of build. Of course for i486 there is no 64 bit build
 404 
 405 
 406 // ---------------------------------------------------------------------------
 407 // The compiled Java calling convention.
 408 // Pass first two oop/int args in registers ECX and EDX.
 409 // Pass first two float/double args in registers XMM0 and XMM1.
 410 // Doubles have precedence, so if you pass a mix of floats and doubles
 411 // the doubles will grab the registers before the floats will.
 412 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 413                                            VMRegPair *regs,
 414                                            int total_args_passed,
 415                                            int is_outgoing) {
 416   uint    stack = 0;          // Starting stack position for args on stack
 417 
 418 
 419   // Pass first two oop/int args in registers ECX and EDX.
 420   uint reg_arg0 = 9999;
 421   uint reg_arg1 = 9999;
 422 
 423   // Pass first two float/double args in registers XMM0 and XMM1.
 424   // Doubles have precedence, so if you pass a mix of floats and doubles
 425   // the doubles will grab the registers before the floats will.
 426   // CNC - TURNED OFF FOR non-SSE.
 427   //       On Intel we have to round all doubles (and most floats) at
 428   //       call sites by storing to the stack in any case.
 429   // UseSSE=0 ==> Don't Use ==> 9999+0
 430   // UseSSE=1 ==> Floats only ==> 9999+1
 431   // UseSSE>=2 ==> Floats or doubles ==> 9999+2
 432   enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
 433   uint fargs = (UseSSE>=2) ? 2 : UseSSE;
 434   uint freg_arg0 = 9999+fargs;
 435   uint freg_arg1 = 9999+fargs;
 436 
 437   // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
 438   int i;
 439   for( i = 0; i < total_args_passed; i++) {
 440     if( sig_bt[i] == T_DOUBLE ) {
 441       // first 2 doubles go in registers
 442       if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
 443       else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
 444       else // Else double is passed low on the stack to be aligned.
 445         stack += 2;
 446     } else if( sig_bt[i] == T_LONG ) {
 447       stack += 2;
 448     }
 449   }
 450   int dstack = 0;             // Separate counter for placing doubles
 451 
 452   // Now pick where all else goes.
 453   for( i = 0; i < total_args_passed; i++) {
 454     // From the type and the argument number (count) compute the location
 455     switch( sig_bt[i] ) {
 456     case T_SHORT:
 457     case T_CHAR:
 458     case T_BYTE:
 459     case T_BOOLEAN:
 460     case T_INT:
 461     case T_ARRAY:
 462     case T_OBJECT:
 463     case T_ADDRESS:
 464       if( reg_arg0 == 9999 )  {
 465         reg_arg0 = i;
 466         regs[i].set1(rcx->as_VMReg());
 467       } else if( reg_arg1 == 9999 )  {
 468         reg_arg1 = i;
 469         regs[i].set1(rdx->as_VMReg());
 470       } else {
 471         regs[i].set1(VMRegImpl::stack2reg(stack++));
 472       }
 473       break;
 474     case T_FLOAT:
 475       if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
 476         freg_arg0 = i;
 477         regs[i].set1(xmm0->as_VMReg());
 478       } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
 479         freg_arg1 = i;
 480         regs[i].set1(xmm1->as_VMReg());
 481       } else {
 482         regs[i].set1(VMRegImpl::stack2reg(stack++));
 483       }
 484       break;
 485     case T_LONG:
 486       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 487       regs[i].set2(VMRegImpl::stack2reg(dstack));
 488       dstack += 2;
 489       break;
 490     case T_DOUBLE:
 491       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 492       if( freg_arg0 == (uint)i ) {
 493         regs[i].set2(xmm0->as_VMReg());
 494       } else if( freg_arg1 == (uint)i ) {
 495         regs[i].set2(xmm1->as_VMReg());
 496       } else {
 497         regs[i].set2(VMRegImpl::stack2reg(dstack));
 498         dstack += 2;
 499       }
 500       break;
 501     case T_VOID: regs[i].set_bad(); break;
 502       break;
 503     default:
 504       ShouldNotReachHere();
 505       break;
 506     }
 507   }
 508 
 509   // return value can be odd number of VMRegImpl stack slots make multiple of 2
 510   return align_up(stack, 2);
 511 }
 512 
 513 // Patch the callers callsite with entry to compiled code if it exists.
 514 static void patch_callers_callsite(MacroAssembler *masm) {
 515   Label L;
 516   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 517   __ jcc(Assembler::equal, L);
 518   // Schedule the branch target address early.
 519   // Call into the VM to patch the caller, then jump to compiled callee
 520   // rax, isn't live so capture return address while we easily can
 521   __ movptr(rax, Address(rsp, 0));
 522   __ pusha();
 523   __ pushf();
 524 
 525   if (UseSSE == 1) {
 526     __ subptr(rsp, 2*wordSize);
 527     __ movflt(Address(rsp, 0), xmm0);
 528     __ movflt(Address(rsp, wordSize), xmm1);
 529   }
 530   if (UseSSE >= 2) {
 531     __ subptr(rsp, 4*wordSize);
 532     __ movdbl(Address(rsp, 0), xmm0);
 533     __ movdbl(Address(rsp, 2*wordSize), xmm1);
 534   }
 535 #ifdef COMPILER2
 536   // C2 may leave the stack dirty if not in SSE2+ mode
 537   if (UseSSE >= 2) {
 538     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 539   } else {
 540     __ empty_FPU_stack();
 541   }
 542 #endif /* COMPILER2 */
 543 
 544   // VM needs caller's callsite
 545   __ push(rax);
 546   // VM needs target method
 547   __ push(rbx);
 548   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 549   __ addptr(rsp, 2*wordSize);
 550 
 551   if (UseSSE == 1) {
 552     __ movflt(xmm0, Address(rsp, 0));
 553     __ movflt(xmm1, Address(rsp, wordSize));
 554     __ addptr(rsp, 2*wordSize);
 555   }
 556   if (UseSSE >= 2) {
 557     __ movdbl(xmm0, Address(rsp, 0));
 558     __ movdbl(xmm1, Address(rsp, 2*wordSize));
 559     __ addptr(rsp, 4*wordSize);
 560   }
 561 
 562   __ popf();
 563   __ popa();
 564   __ bind(L);
 565 }
 566 
 567 
 568 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
 569   int next_off = st_off - Interpreter::stackElementSize;
 570   __ movdbl(Address(rsp, next_off), r);
 571 }
 572 
 573 static void gen_c2i_adapter(MacroAssembler *masm,
 574                             int total_args_passed,
 575                             int comp_args_on_stack,
 576                             const BasicType *sig_bt,
 577                             const VMRegPair *regs,
 578                             Label& skip_fixup) {
 579   // Before we get into the guts of the C2I adapter, see if we should be here
 580   // at all.  We've come from compiled code and are attempting to jump to the
 581   // interpreter, which means the caller made a static call to get here
 582   // (vcalls always get a compiled target if there is one).  Check for a
 583   // compiled target.  If there is one, we need to patch the caller's call.
 584   patch_callers_callsite(masm);
 585 
 586   __ bind(skip_fixup);
 587 
 588 #ifdef COMPILER2
 589   // C2 may leave the stack dirty if not in SSE2+ mode
 590   if (UseSSE >= 2) {
 591     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 592   } else {
 593     __ empty_FPU_stack();
 594   }
 595 #endif /* COMPILER2 */
 596 
 597   // Since all args are passed on the stack, total_args_passed * interpreter_
 598   // stack_element_size  is the
 599   // space we need.
 600   int extraspace = total_args_passed * Interpreter::stackElementSize;
 601 
 602   // Get return address
 603   __ pop(rax);
 604 
 605   // set senderSP value
 606   __ movptr(rsi, rsp);
 607 
 608   __ subptr(rsp, extraspace);
 609 
 610   // Now write the args into the outgoing interpreter space
 611   for (int i = 0; i < total_args_passed; i++) {
 612     if (sig_bt[i] == T_VOID) {
 613       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 614       continue;
 615     }
 616 
 617     // st_off points to lowest address on stack.
 618     int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
 619     int next_off = st_off - Interpreter::stackElementSize;
 620 
 621     // Say 4 args:
 622     // i   st_off
 623     // 0   12 T_LONG
 624     // 1    8 T_VOID
 625     // 2    4 T_OBJECT
 626     // 3    0 T_BOOL
 627     VMReg r_1 = regs[i].first();
 628     VMReg r_2 = regs[i].second();
 629     if (!r_1->is_valid()) {
 630       assert(!r_2->is_valid(), "");
 631       continue;
 632     }
 633 
 634     if (r_1->is_stack()) {
 635       // memory to memory use fpu stack top
 636       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 637 
 638       if (!r_2->is_valid()) {
 639         __ movl(rdi, Address(rsp, ld_off));
 640         __ movptr(Address(rsp, st_off), rdi);
 641       } else {
 642 
 643         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
 644         // st_off == MSW, st_off-wordSize == LSW
 645 
 646         __ movptr(rdi, Address(rsp, ld_off));
 647         __ movptr(Address(rsp, next_off), rdi);
 648 #ifndef _LP64
 649         __ movptr(rdi, Address(rsp, ld_off + wordSize));
 650         __ movptr(Address(rsp, st_off), rdi);
 651 #else
 652 #ifdef ASSERT
 653         // Overwrite the unused slot with known junk
 654         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 655         __ movptr(Address(rsp, st_off), rax);
 656 #endif /* ASSERT */
 657 #endif // _LP64
 658       }
 659     } else if (r_1->is_Register()) {
 660       Register r = r_1->as_Register();
 661       if (!r_2->is_valid()) {
 662         __ movl(Address(rsp, st_off), r);
 663       } else {
 664         // long/double in gpr
 665         NOT_LP64(ShouldNotReachHere());
 666         // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 667         // T_DOUBLE and T_LONG use two slots in the interpreter
 668         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 669           // long/double in gpr
 670 #ifdef ASSERT
 671           // Overwrite the unused slot with known junk
 672           LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
 673           __ movptr(Address(rsp, st_off), rax);
 674 #endif /* ASSERT */
 675           __ movptr(Address(rsp, next_off), r);
 676         } else {
 677           __ movptr(Address(rsp, st_off), r);
 678         }
 679       }
 680     } else {
 681       assert(r_1->is_XMMRegister(), "");
 682       if (!r_2->is_valid()) {
 683         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 684       } else {
 685         assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
 686         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
 687       }
 688     }
 689   }
 690 
 691   // Schedule the branch target address early.
 692   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 693   // And repush original return address
 694   __ push(rax);
 695   __ jmp(rcx);
 696 }
 697 
 698 
 699 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
 700   int next_val_off = ld_off - Interpreter::stackElementSize;
 701   __ movdbl(r, Address(saved_sp, next_val_off));
 702 }
 703 
 704 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 705                         address code_start, address code_end,
 706                         Label& L_ok) {
 707   Label L_fail;
 708   __ lea(temp_reg, ExternalAddress(code_start));
 709   __ cmpptr(pc_reg, temp_reg);
 710   __ jcc(Assembler::belowEqual, L_fail);
 711   __ lea(temp_reg, ExternalAddress(code_end));
 712   __ cmpptr(pc_reg, temp_reg);
 713   __ jcc(Assembler::below, L_ok);
 714   __ bind(L_fail);
 715 }
 716 
 717 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 718                                     int total_args_passed,
 719                                     int comp_args_on_stack,
 720                                     const BasicType *sig_bt,
 721                                     const VMRegPair *regs) {
 722   // Note: rsi contains the senderSP on entry. We must preserve it since
 723   // we may do a i2c -> c2i transition if we lose a race where compiled
 724   // code goes non-entrant while we get args ready.
 725 
 726   // Adapters can be frameless because they do not require the caller
 727   // to perform additional cleanup work, such as correcting the stack pointer.
 728   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 729   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 730   // even if a callee has modified the stack pointer.
 731   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 732   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 733   // up via the senderSP register).
 734   // In other words, if *either* the caller or callee is interpreted, we can
 735   // get the stack pointer repaired after a call.
 736   // This is why c2i and i2c adapters cannot be indefinitely composed.
 737   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 738   // both caller and callee would be compiled methods, and neither would
 739   // clean up the stack pointer changes performed by the two adapters.
 740   // If this happens, control eventually transfers back to the compiled
 741   // caller, but with an uncorrected stack, causing delayed havoc.
 742 
 743   // Pick up the return address
 744   __ movptr(rax, Address(rsp, 0));
 745 
 746   if (VerifyAdapterCalls &&
 747       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 748     // So, let's test for cascading c2i/i2c adapters right now.
 749     //  assert(Interpreter::contains($return_addr) ||
 750     //         StubRoutines::contains($return_addr),
 751     //         "i2c adapter must return to an interpreter frame");
 752     __ block_comment("verify_i2c { ");
 753     Label L_ok;
 754     if (Interpreter::code() != NULL)
 755       range_check(masm, rax, rdi,
 756                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 757                   L_ok);
 758     if (StubRoutines::code1() != NULL)
 759       range_check(masm, rax, rdi,
 760                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 761                   L_ok);
 762     if (StubRoutines::code2() != NULL)
 763       range_check(masm, rax, rdi,
 764                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 765                   L_ok);
 766     const char* msg = "i2c adapter must return to an interpreter frame";
 767     __ block_comment(msg);
 768     __ stop(msg);
 769     __ bind(L_ok);
 770     __ block_comment("} verify_i2ce ");
 771   }
 772 
 773   // Must preserve original SP for loading incoming arguments because
 774   // we need to align the outgoing SP for compiled code.
 775   __ movptr(rdi, rsp);
 776 
 777   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
 778   // in registers, we will occasionally have no stack args.
 779   int comp_words_on_stack = 0;
 780   if (comp_args_on_stack) {
 781     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
 782     // registers are below.  By subtracting stack0, we either get a negative
 783     // number (all values in registers) or the maximum stack slot accessed.
 784     // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
 785     // Convert 4-byte stack slots to words.
 786     comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
 787     // Round up to miminum stack alignment, in wordSize
 788     comp_words_on_stack = align_up(comp_words_on_stack, 2);
 789     __ subptr(rsp, comp_words_on_stack * wordSize);
 790   }
 791 
 792   // Align the outgoing SP
 793   __ andptr(rsp, -(StackAlignmentInBytes));
 794 
 795   // push the return address on the stack (note that pushing, rather
 796   // than storing it, yields the correct frame alignment for the callee)
 797   __ push(rax);
 798 
 799   // Put saved SP in another register
 800   const Register saved_sp = rax;
 801   __ movptr(saved_sp, rdi);
 802 
 803 
 804   // Will jump to the compiled code just as if compiled code was doing it.
 805   // Pre-load the register-jump target early, to schedule it better.
 806   __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
 807 
 808   // Now generate the shuffle code.  Pick up all register args and move the
 809   // rest through the floating point stack top.
 810   for (int i = 0; i < total_args_passed; i++) {
 811     if (sig_bt[i] == T_VOID) {
 812       // Longs and doubles are passed in native word order, but misaligned
 813       // in the 32-bit build.
 814       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 815       continue;
 816     }
 817 
 818     // Pick up 0, 1 or 2 words from SP+offset.
 819 
 820     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 821             "scrambled load targets?");
 822     // Load in argument order going down.
 823     int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
 824     // Point to interpreter value (vs. tag)
 825     int next_off = ld_off - Interpreter::stackElementSize;
 826     //
 827     //
 828     //
 829     VMReg r_1 = regs[i].first();
 830     VMReg r_2 = regs[i].second();
 831     if (!r_1->is_valid()) {
 832       assert(!r_2->is_valid(), "");
 833       continue;
 834     }
 835     if (r_1->is_stack()) {
 836       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 837       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 838 
 839       // We can use rsi as a temp here because compiled code doesn't need rsi as an input
 840       // and if we end up going thru a c2i because of a miss a reasonable value of rsi
 841       // we be generated.
 842       if (!r_2->is_valid()) {
 843         // __ fld_s(Address(saved_sp, ld_off));
 844         // __ fstp_s(Address(rsp, st_off));
 845         __ movl(rsi, Address(saved_sp, ld_off));
 846         __ movptr(Address(rsp, st_off), rsi);
 847       } else {
 848         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 849         // are accessed as negative so LSW is at LOW address
 850 
 851         // ld_off is MSW so get LSW
 852         // st_off is LSW (i.e. reg.first())
 853         // __ fld_d(Address(saved_sp, next_off));
 854         // __ fstp_d(Address(rsp, st_off));
 855         //
 856         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 857         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 858         // So we must adjust where to pick up the data to match the interpreter.
 859         //
 860         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 861         // are accessed as negative so LSW is at LOW address
 862 
 863         // ld_off is MSW so get LSW
 864         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 865                            next_off : ld_off;
 866         __ movptr(rsi, Address(saved_sp, offset));
 867         __ movptr(Address(rsp, st_off), rsi);
 868 #ifndef _LP64
 869         __ movptr(rsi, Address(saved_sp, ld_off));
 870         __ movptr(Address(rsp, st_off + wordSize), rsi);
 871 #endif // _LP64
 872       }
 873     } else if (r_1->is_Register()) {  // Register argument
 874       Register r = r_1->as_Register();
 875       assert(r != rax, "must be different");
 876       if (r_2->is_valid()) {
 877         //
 878         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 879         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 880         // So we must adjust where to pick up the data to match the interpreter.
 881 
 882         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 883                            next_off : ld_off;
 884 
 885         // this can be a misaligned move
 886         __ movptr(r, Address(saved_sp, offset));
 887 #ifndef _LP64
 888         assert(r_2->as_Register() != rax, "need another temporary register");
 889         // Remember r_1 is low address (and LSB on x86)
 890         // So r_2 gets loaded from high address regardless of the platform
 891         __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
 892 #endif // _LP64
 893       } else {
 894         __ movl(r, Address(saved_sp, ld_off));
 895       }
 896     } else {
 897       assert(r_1->is_XMMRegister(), "");
 898       if (!r_2->is_valid()) {
 899         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 900       } else {
 901         move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
 902       }
 903     }
 904   }
 905 
 906   // 6243940 We might end up in handle_wrong_method if
 907   // the callee is deoptimized as we race thru here. If that
 908   // happens we don't want to take a safepoint because the
 909   // caller frame will look interpreted and arguments are now
 910   // "compiled" so it is much better to make this transition
 911   // invisible to the stack walking code. Unfortunately if
 912   // we try and find the callee by normal means a safepoint
 913   // is possible. So we stash the desired callee in the thread
 914   // and the vm will find there should this case occur.
 915 
 916   __ get_thread(rax);
 917   __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
 918 
 919   // move Method* to rax, in case we end up in an c2i adapter.
 920   // the c2i adapters expect Method* in rax, (c2) because c2's
 921   // resolve stubs return the result (the method) in rax,.
 922   // I'd love to fix this.
 923   __ mov(rax, rbx);
 924 
 925   __ jmp(rdi);
 926 }
 927 
 928 // ---------------------------------------------------------------
 929 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 930                                                             int total_args_passed,
 931                                                             int comp_args_on_stack,
 932                                                             const BasicType *sig_bt,
 933                                                             const VMRegPair *regs,
 934                                                             AdapterFingerPrint* fingerprint) {
 935   address i2c_entry = __ pc();
 936 
 937   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 938 
 939   // -------------------------------------------------------------------------
 940   // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
 941   // to the interpreter.  The args start out packed in the compiled layout.  They
 942   // need to be unpacked into the interpreter layout.  This will almost always
 943   // require some stack space.  We grow the current (compiled) stack, then repack
 944   // the args.  We  finally end in a jump to the generic interpreter entry point.
 945   // On exit from the interpreter, the interpreter will restore our SP (lest the
 946   // compiled code, which relys solely on SP and not EBP, get sick).
 947 
 948   address c2i_unverified_entry = __ pc();
 949   Label skip_fixup;
 950 
 951   Register holder = rax;
 952   Register receiver = rcx;
 953   Register temp = rbx;
 954 
 955   {
 956 
 957     Label missed;
 958     __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
 959     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 960     __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
 961     __ jcc(Assembler::notEqual, missed);
 962     // Method might have been compiled since the call site was patched to
 963     // interpreted if that is the case treat it as a miss so we can get
 964     // the call site corrected.
 965     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 966     __ jcc(Assembler::equal, skip_fixup);
 967 
 968     __ bind(missed);
 969     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 970   }
 971 
 972   address c2i_entry = __ pc();
 973 
 974   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 975 
 976   __ flush();
 977   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 978 }
 979 
 980 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 981                                          VMRegPair *regs,
 982                                          VMRegPair *regs2,
 983                                          int total_args_passed) {
 984   assert(regs2 == NULL, "not needed on x86");
 985 // We return the amount of VMRegImpl stack slots we need to reserve for all
 986 // the arguments NOT counting out_preserve_stack_slots.
 987 
 988   uint    stack = 0;        // All arguments on stack
 989 
 990   for( int i = 0; i < total_args_passed; i++) {
 991     // From the type and the argument number (count) compute the location
 992     switch( sig_bt[i] ) {
 993     case T_BOOLEAN:
 994     case T_CHAR:
 995     case T_FLOAT:
 996     case T_BYTE:
 997     case T_SHORT:
 998     case T_INT:
 999     case T_OBJECT:
1000     case T_ARRAY:
1001     case T_ADDRESS:
1002     case T_METADATA:
1003       regs[i].set1(VMRegImpl::stack2reg(stack++));
1004       break;
1005     case T_LONG:
1006     case T_DOUBLE: // The stack numbering is reversed from Java
1007       // Since C arguments do not get reversed, the ordering for
1008       // doubles on the stack must be opposite the Java convention
1009       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
1010       regs[i].set2(VMRegImpl::stack2reg(stack));
1011       stack += 2;
1012       break;
1013     case T_VOID: regs[i].set_bad(); break;
1014     default:
1015       ShouldNotReachHere();
1016       break;
1017     }
1018   }
1019   return stack;
1020 }
1021 
1022 // A simple move of integer like type
1023 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1024   if (src.first()->is_stack()) {
1025     if (dst.first()->is_stack()) {
1026       // stack to stack
1027       // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1028       // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1029       __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1030       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1031     } else {
1032       // stack to reg
1033       __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
1034     }
1035   } else if (dst.first()->is_stack()) {
1036     // reg to stack
1037     // no need to sign extend on 64bit
1038     __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1039   } else {
1040     if (dst.first() != src.first()) {
1041       __ mov(dst.first()->as_Register(), src.first()->as_Register());
1042     }
1043   }
1044 }
1045 
1046 // An oop arg. Must pass a handle not the oop itself
1047 static void object_move(MacroAssembler* masm,
1048                         OopMap* map,
1049                         int oop_handle_offset,
1050                         int framesize_in_slots,
1051                         VMRegPair src,
1052                         VMRegPair dst,
1053                         bool is_receiver,
1054                         int* receiver_offset) {
1055 
1056   // Because of the calling conventions we know that src can be a
1057   // register or a stack location. dst can only be a stack location.
1058 
1059   assert(dst.first()->is_stack(), "must be stack");
1060   // must pass a handle. First figure out the location we use as a handle
1061 
1062   if (src.first()->is_stack()) {
1063     // Oop is already on the stack as an argument
1064     Register rHandle = rax;
1065     Label nil;
1066     __ xorptr(rHandle, rHandle);
1067     __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1068     __ jcc(Assembler::equal, nil);
1069     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1070     __ bind(nil);
1071     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1072 
1073     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1074     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1075     if (is_receiver) {
1076       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1077     }
1078   } else {
1079     // Oop is in an a register we must store it to the space we reserve
1080     // on the stack for oop_handles
1081     const Register rOop = src.first()->as_Register();
1082     const Register rHandle = rax;
1083     int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1084     int offset = oop_slot*VMRegImpl::stack_slot_size;
1085     Label skip;
1086     __ movptr(Address(rsp, offset), rOop);
1087     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1088     __ xorptr(rHandle, rHandle);
1089     __ cmpptr(rOop, (int32_t)NULL_WORD);
1090     __ jcc(Assembler::equal, skip);
1091     __ lea(rHandle, Address(rsp, offset));
1092     __ bind(skip);
1093     // Store the handle parameter
1094     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1095     if (is_receiver) {
1096       *receiver_offset = offset;
1097     }
1098   }
1099 }
1100 
1101 // A float arg may have to do float reg int reg conversion
1102 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1103   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1104 
1105   // Because of the calling convention we know that src is either a stack location
1106   // or an xmm register. dst can only be a stack location.
1107 
1108   assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1109 
1110   if (src.first()->is_stack()) {
1111     __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1112     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1113   } else {
1114     // reg to stack
1115     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1116   }
1117 }
1118 
1119 // A long move
1120 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1121 
1122   // The only legal possibility for a long_move VMRegPair is:
1123   // 1: two stack slots (possibly unaligned)
1124   // as neither the java  or C calling convention will use registers
1125   // for longs.
1126 
1127   if (src.first()->is_stack() && dst.first()->is_stack()) {
1128     assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1129     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1130     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1131     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1132     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1133   } else {
1134     ShouldNotReachHere();
1135   }
1136 }
1137 
1138 // A double move
1139 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1140 
1141   // The only legal possibilities for a double_move VMRegPair are:
1142   // The painful thing here is that like long_move a VMRegPair might be
1143 
1144   // Because of the calling convention we know that src is either
1145   //   1: a single physical register (xmm registers only)
1146   //   2: two stack slots (possibly unaligned)
1147   // dst can only be a pair of stack slots.
1148 
1149   assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1150 
1151   if (src.first()->is_stack()) {
1152     // source is all stack
1153     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1154     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1155     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1156     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1157   } else {
1158     // reg to stack
1159     // No worries about stack alignment
1160     __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1161   }
1162 }
1163 
1164 
1165 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1166   // We always ignore the frame_slots arg and just use the space just below frame pointer
1167   // which by this time is free to use
1168   switch (ret_type) {
1169   case T_FLOAT:
1170     __ fstp_s(Address(rbp, -wordSize));
1171     break;
1172   case T_DOUBLE:
1173     __ fstp_d(Address(rbp, -2*wordSize));
1174     break;
1175   case T_VOID:  break;
1176   case T_LONG:
1177     __ movptr(Address(rbp, -wordSize), rax);
1178     NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
1179     break;
1180   default: {
1181     __ movptr(Address(rbp, -wordSize), rax);
1182     }
1183   }
1184 }
1185 
1186 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1187   // We always ignore the frame_slots arg and just use the space just below frame pointer
1188   // which by this time is free to use
1189   switch (ret_type) {
1190   case T_FLOAT:
1191     __ fld_s(Address(rbp, -wordSize));
1192     break;
1193   case T_DOUBLE:
1194     __ fld_d(Address(rbp, -2*wordSize));
1195     break;
1196   case T_LONG:
1197     __ movptr(rax, Address(rbp, -wordSize));
1198     NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
1199     break;
1200   case T_VOID:  break;
1201   default: {
1202     __ movptr(rax, Address(rbp, -wordSize));
1203     }
1204   }
1205 }
1206 
1207 
1208 static void save_or_restore_arguments(MacroAssembler* masm,
1209                                       const int stack_slots,
1210                                       const int total_in_args,
1211                                       const int arg_save_area,
1212                                       OopMap* map,
1213                                       VMRegPair* in_regs,
1214                                       BasicType* in_sig_bt) {
1215   // if map is non-NULL then the code should store the values,
1216   // otherwise it should load them.
1217   int handle_index = 0;
1218   // Save down double word first
1219   for ( int i = 0; i < total_in_args; i++) {
1220     if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1221       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1222       int offset = slot * VMRegImpl::stack_slot_size;
1223       handle_index += 2;
1224       assert(handle_index <= stack_slots, "overflow");
1225       if (map != NULL) {
1226         __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1227       } else {
1228         __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1229       }
1230     }
1231     if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
1232       int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1233       int offset = slot * VMRegImpl::stack_slot_size;
1234       handle_index += 2;
1235       assert(handle_index <= stack_slots, "overflow");
1236       if (map != NULL) {
1237         __ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
1238         if (in_regs[i].second()->is_Register()) {
1239           __ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
1240         }
1241       } else {
1242         __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
1243         if (in_regs[i].second()->is_Register()) {
1244           __ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
1245         }
1246       }
1247     }
1248   }
1249   // Save or restore single word registers
1250   for ( int i = 0; i < total_in_args; i++) {
1251     if (in_regs[i].first()->is_Register()) {
1252       int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1253       int offset = slot * VMRegImpl::stack_slot_size;
1254       assert(handle_index <= stack_slots, "overflow");
1255       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1256         map->set_oop(VMRegImpl::stack2reg(slot));;
1257       }
1258 
1259       // Value is in an input register pass we must flush it to the stack
1260       const Register reg = in_regs[i].first()->as_Register();
1261       switch (in_sig_bt[i]) {
1262         case T_ARRAY:
1263           if (map != NULL) {
1264             __ movptr(Address(rsp, offset), reg);
1265           } else {
1266             __ movptr(reg, Address(rsp, offset));
1267           }
1268           break;
1269         case T_BOOLEAN:
1270         case T_CHAR:
1271         case T_BYTE:
1272         case T_SHORT:
1273         case T_INT:
1274           if (map != NULL) {
1275             __ movl(Address(rsp, offset), reg);
1276           } else {
1277             __ movl(reg, Address(rsp, offset));
1278           }
1279           break;
1280         case T_OBJECT:
1281         default: ShouldNotReachHere();
1282       }
1283     } else if (in_regs[i].first()->is_XMMRegister()) {
1284       if (in_sig_bt[i] == T_FLOAT) {
1285         int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1286         int offset = slot * VMRegImpl::stack_slot_size;
1287         assert(handle_index <= stack_slots, "overflow");
1288         if (map != NULL) {
1289           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1290         } else {
1291           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1292         }
1293       }
1294     } else if (in_regs[i].first()->is_stack()) {
1295       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1296         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1297         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1298       }
1299     }
1300   }
1301 }
1302 
1303 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1304 // keeps a new JNI critical region from starting until a GC has been
1305 // forced.  Save down any oops in registers and describe them in an
1306 // OopMap.
1307 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1308                                                Register thread,
1309                                                int stack_slots,
1310                                                int total_c_args,
1311                                                int total_in_args,
1312                                                int arg_save_area,
1313                                                OopMapSet* oop_maps,
1314                                                VMRegPair* in_regs,
1315                                                BasicType* in_sig_bt) {
1316   __ block_comment("check GCLocker::needs_gc");
1317   Label cont;
1318   __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1319   __ jcc(Assembler::equal, cont);
1320 
1321   // Save down any incoming oops and call into the runtime to halt for a GC
1322 
1323   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1324 
1325   save_or_restore_arguments(masm, stack_slots, total_in_args,
1326                             arg_save_area, map, in_regs, in_sig_bt);
1327 
1328   address the_pc = __ pc();
1329   oop_maps->add_gc_map( __ offset(), map);
1330   __ set_last_Java_frame(thread, rsp, noreg, the_pc);
1331 
1332   __ block_comment("block_for_jni_critical");
1333   __ push(thread);
1334   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1335   __ increment(rsp, wordSize);
1336 
1337   __ get_thread(thread);
1338   __ reset_last_Java_frame(thread, false);
1339 
1340   save_or_restore_arguments(masm, stack_slots, total_in_args,
1341                             arg_save_area, NULL, in_regs, in_sig_bt);
1342 
1343   __ bind(cont);
1344 #ifdef ASSERT
1345   if (StressCriticalJNINatives) {
1346     // Stress register saving
1347     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1348     save_or_restore_arguments(masm, stack_slots, total_in_args,
1349                               arg_save_area, map, in_regs, in_sig_bt);
1350     // Destroy argument registers
1351     for (int i = 0; i < total_in_args - 1; i++) {
1352       if (in_regs[i].first()->is_Register()) {
1353         const Register reg = in_regs[i].first()->as_Register();
1354         __ xorptr(reg, reg);
1355       } else if (in_regs[i].first()->is_XMMRegister()) {
1356         __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1357       } else if (in_regs[i].first()->is_FloatRegister()) {
1358         ShouldNotReachHere();
1359       } else if (in_regs[i].first()->is_stack()) {
1360         // Nothing to do
1361       } else {
1362         ShouldNotReachHere();
1363       }
1364       if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1365         i++;
1366       }
1367     }
1368 
1369     save_or_restore_arguments(masm, stack_slots, total_in_args,
1370                               arg_save_area, NULL, in_regs, in_sig_bt);
1371   }
1372 #endif
1373 }
1374 
1375 // Unpack an array argument into a pointer to the body and the length
1376 // if the array is non-null, otherwise pass 0 for both.
1377 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1378   Register tmp_reg = rax;
1379   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1380          "possible collision");
1381   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1382          "possible collision");
1383 
1384   // Pass the length, ptr pair
1385   Label is_null, done;
1386   VMRegPair tmp(tmp_reg->as_VMReg());
1387   if (reg.first()->is_stack()) {
1388     // Load the arg up from the stack
1389     simple_move32(masm, reg, tmp);
1390     reg = tmp;
1391   }
1392   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1393   __ jccb(Assembler::equal, is_null);
1394   __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1395   simple_move32(masm, tmp, body_arg);
1396   // load the length relative to the body.
1397   __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1398                            arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1399   simple_move32(masm, tmp, length_arg);
1400   __ jmpb(done);
1401   __ bind(is_null);
1402   // Pass zeros
1403   __ xorptr(tmp_reg, tmp_reg);
1404   simple_move32(masm, tmp, body_arg);
1405   simple_move32(masm, tmp, length_arg);
1406   __ bind(done);
1407 }
1408 
1409 static void verify_oop_args(MacroAssembler* masm,
1410                             const methodHandle& method,
1411                             const BasicType* sig_bt,
1412                             const VMRegPair* regs) {
1413   Register temp_reg = rbx;  // not part of any compiled calling seq
1414   if (VerifyOops) {
1415     for (int i = 0; i < method->size_of_parameters(); i++) {
1416       if (sig_bt[i] == T_OBJECT ||
1417           sig_bt[i] == T_ARRAY) {
1418         VMReg r = regs[i].first();
1419         assert(r->is_valid(), "bad oop arg");
1420         if (r->is_stack()) {
1421           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1422           __ verify_oop(temp_reg);
1423         } else {
1424           __ verify_oop(r->as_Register());
1425         }
1426       }
1427     }
1428   }
1429 }
1430 
1431 static void gen_special_dispatch(MacroAssembler* masm,
1432                                  const methodHandle& method,
1433                                  const BasicType* sig_bt,
1434                                  const VMRegPair* regs) {
1435   verify_oop_args(masm, method, sig_bt, regs);
1436   vmIntrinsics::ID iid = method->intrinsic_id();
1437 
1438   // Now write the args into the outgoing interpreter space
1439   bool     has_receiver   = false;
1440   Register receiver_reg   = noreg;
1441   int      member_arg_pos = -1;
1442   Register member_reg     = noreg;
1443   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1444   if (ref_kind != 0) {
1445     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1446     member_reg = rbx;  // known to be free at this point
1447     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1448   } else if (iid == vmIntrinsics::_invokeBasic) {
1449     has_receiver = true;
1450   } else {
1451     fatal("unexpected intrinsic id %d", iid);
1452   }
1453 
1454   if (member_reg != noreg) {
1455     // Load the member_arg into register, if necessary.
1456     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1457     VMReg r = regs[member_arg_pos].first();
1458     if (r->is_stack()) {
1459       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1460     } else {
1461       // no data motion is needed
1462       member_reg = r->as_Register();
1463     }
1464   }
1465 
1466   if (has_receiver) {
1467     // Make sure the receiver is loaded into a register.
1468     assert(method->size_of_parameters() > 0, "oob");
1469     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1470     VMReg r = regs[0].first();
1471     assert(r->is_valid(), "bad receiver arg");
1472     if (r->is_stack()) {
1473       // Porting note:  This assumes that compiled calling conventions always
1474       // pass the receiver oop in a register.  If this is not true on some
1475       // platform, pick a temp and load the receiver from stack.
1476       fatal("receiver always in a register");
1477       receiver_reg = rcx;  // known to be free at this point
1478       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1479     } else {
1480       // no data motion is needed
1481       receiver_reg = r->as_Register();
1482     }
1483   }
1484 
1485   // Figure out which address we are really jumping to:
1486   MethodHandles::generate_method_handle_dispatch(masm, iid,
1487                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1488 }
1489 
1490 // ---------------------------------------------------------------------------
1491 // Generate a native wrapper for a given method.  The method takes arguments
1492 // in the Java compiled code convention, marshals them to the native
1493 // convention (handlizes oops, etc), transitions to native, makes the call,
1494 // returns to java state (possibly blocking), unhandlizes any result and
1495 // returns.
1496 //
1497 // Critical native functions are a shorthand for the use of
1498 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1499 // functions.  The wrapper is expected to unpack the arguments before
1500 // passing them to the callee and perform checks before and after the
1501 // native call to ensure that they GCLocker
1502 // lock_critical/unlock_critical semantics are followed.  Some other
1503 // parts of JNI setup are skipped like the tear down of the JNI handle
1504 // block and the check for pending exceptions it's impossible for them
1505 // to be thrown.
1506 //
1507 // They are roughly structured like this:
1508 //    if (GCLocker::needs_gc())
1509 //      SharedRuntime::block_for_jni_critical();
1510 //    tranistion to thread_in_native
1511 //    unpack arrray arguments and call native entry point
1512 //    check for safepoint in progress
1513 //    check if any thread suspend flags are set
1514 //      call into JVM and possible unlock the JNI critical
1515 //      if a GC was suppressed while in the critical native.
1516 //    transition back to thread_in_Java
1517 //    return to caller
1518 //
1519 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1520                                                 const methodHandle& method,
1521                                                 int compile_id,
1522                                                 BasicType* in_sig_bt,
1523                                                 VMRegPair* in_regs,
1524                                                 BasicType ret_type) {
1525   if (method->is_method_handle_intrinsic()) {
1526     vmIntrinsics::ID iid = method->intrinsic_id();
1527     intptr_t start = (intptr_t)__ pc();
1528     int vep_offset = ((intptr_t)__ pc()) - start;
1529     gen_special_dispatch(masm,
1530                          method,
1531                          in_sig_bt,
1532                          in_regs);
1533     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1534     __ flush();
1535     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1536     return nmethod::new_native_nmethod(method,
1537                                        compile_id,
1538                                        masm->code(),
1539                                        vep_offset,
1540                                        frame_complete,
1541                                        stack_slots / VMRegImpl::slots_per_word,
1542                                        in_ByteSize(-1),
1543                                        in_ByteSize(-1),
1544                                        (OopMapSet*)NULL);
1545   }
1546   bool is_critical_native = true;
1547   address native_func = method->critical_native_function();
1548   if (native_func == NULL) {
1549     native_func = method->native_function();
1550     is_critical_native = false;
1551   }
1552   assert(native_func != NULL, "must have function");
1553 
1554   // An OopMap for lock (and class if static)
1555   OopMapSet *oop_maps = new OopMapSet();
1556 
1557   // We have received a description of where all the java arg are located
1558   // on entry to the wrapper. We need to convert these args to where
1559   // the jni function will expect them. To figure out where they go
1560   // we convert the java signature to a C signature by inserting
1561   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1562 
1563   const int total_in_args = method->size_of_parameters();
1564   int total_c_args = total_in_args;
1565   if (!is_critical_native) {
1566     total_c_args += 1;
1567     if (method->is_static()) {
1568       total_c_args++;
1569     }
1570   } else {
1571     for (int i = 0; i < total_in_args; i++) {
1572       if (in_sig_bt[i] == T_ARRAY) {
1573         total_c_args++;
1574       }
1575     }
1576   }
1577 
1578   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1579   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1580   BasicType* in_elem_bt = NULL;
1581 
1582   int argc = 0;
1583   if (!is_critical_native) {
1584     out_sig_bt[argc++] = T_ADDRESS;
1585     if (method->is_static()) {
1586       out_sig_bt[argc++] = T_OBJECT;
1587     }
1588 
1589     for (int i = 0; i < total_in_args ; i++ ) {
1590       out_sig_bt[argc++] = in_sig_bt[i];
1591     }
1592   } else {
1593     Thread* THREAD = Thread::current();
1594     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1595     SignatureStream ss(method->signature());
1596     for (int i = 0; i < total_in_args ; i++ ) {
1597       if (in_sig_bt[i] == T_ARRAY) {
1598         // Arrays are passed as int, elem* pair
1599         out_sig_bt[argc++] = T_INT;
1600         out_sig_bt[argc++] = T_ADDRESS;
1601         Symbol* atype = ss.as_symbol(CHECK_NULL);
1602         const char* at = atype->as_C_string();
1603         if (strlen(at) == 2) {
1604           assert(at[0] == '[', "must be");
1605           switch (at[1]) {
1606             case 'B': in_elem_bt[i]  = T_BYTE; break;
1607             case 'C': in_elem_bt[i]  = T_CHAR; break;
1608             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1609             case 'F': in_elem_bt[i]  = T_FLOAT; break;
1610             case 'I': in_elem_bt[i]  = T_INT; break;
1611             case 'J': in_elem_bt[i]  = T_LONG; break;
1612             case 'S': in_elem_bt[i]  = T_SHORT; break;
1613             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1614             default: ShouldNotReachHere();
1615           }
1616         }
1617       } else {
1618         out_sig_bt[argc++] = in_sig_bt[i];
1619         in_elem_bt[i] = T_VOID;
1620       }
1621       if (in_sig_bt[i] != T_VOID) {
1622         assert(in_sig_bt[i] == ss.type(), "must match");
1623         ss.next();
1624       }
1625     }
1626   }
1627 
1628   // Now figure out where the args must be stored and how much stack space
1629   // they require.
1630   int out_arg_slots;
1631   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1632 
1633   // Compute framesize for the wrapper.  We need to handlize all oops in
1634   // registers a max of 2 on x86.
1635 
1636   // Calculate the total number of stack slots we will need.
1637 
1638   // First count the abi requirement plus all of the outgoing args
1639   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1640 
1641   // Now the space for the inbound oop handle area
1642   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1643   if (is_critical_native) {
1644     // Critical natives may have to call out so they need a save area
1645     // for register arguments.
1646     int double_slots = 0;
1647     int single_slots = 0;
1648     for ( int i = 0; i < total_in_args; i++) {
1649       if (in_regs[i].first()->is_Register()) {
1650         const Register reg = in_regs[i].first()->as_Register();
1651         switch (in_sig_bt[i]) {
1652           case T_ARRAY:  // critical array (uses 2 slots on LP64)
1653           case T_BOOLEAN:
1654           case T_BYTE:
1655           case T_SHORT:
1656           case T_CHAR:
1657           case T_INT:  single_slots++; break;
1658           case T_LONG: double_slots++; break;
1659           default:  ShouldNotReachHere();
1660         }
1661       } else if (in_regs[i].first()->is_XMMRegister()) {
1662         switch (in_sig_bt[i]) {
1663           case T_FLOAT:  single_slots++; break;
1664           case T_DOUBLE: double_slots++; break;
1665           default:  ShouldNotReachHere();
1666         }
1667       } else if (in_regs[i].first()->is_FloatRegister()) {
1668         ShouldNotReachHere();
1669       }
1670     }
1671     total_save_slots = double_slots * 2 + single_slots;
1672     // align the save area
1673     if (double_slots != 0) {
1674       stack_slots = align_up(stack_slots, 2);
1675     }
1676   }
1677 
1678   int oop_handle_offset = stack_slots;
1679   stack_slots += total_save_slots;
1680 
1681   // Now any space we need for handlizing a klass if static method
1682 
1683   int klass_slot_offset = 0;
1684   int klass_offset = -1;
1685   int lock_slot_offset = 0;
1686   bool is_static = false;
1687 
1688   if (method->is_static()) {
1689     klass_slot_offset = stack_slots;
1690     stack_slots += VMRegImpl::slots_per_word;
1691     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1692     is_static = true;
1693   }
1694 
1695   // Plus a lock if needed
1696 
1697   if (method->is_synchronized()) {
1698     lock_slot_offset = stack_slots;
1699     stack_slots += VMRegImpl::slots_per_word;
1700   }
1701 
1702   // Now a place (+2) to save return values or temp during shuffling
1703   // + 2 for return address (which we own) and saved rbp,
1704   stack_slots += 4;
1705 
1706   // Ok The space we have allocated will look like:
1707   //
1708   //
1709   // FP-> |                     |
1710   //      |---------------------|
1711   //      | 2 slots for moves   |
1712   //      |---------------------|
1713   //      | lock box (if sync)  |
1714   //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1715   //      | klass (if static)   |
1716   //      |---------------------| <- klass_slot_offset
1717   //      | oopHandle area      |
1718   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1719   //      | outbound memory     |
1720   //      | based arguments     |
1721   //      |                     |
1722   //      |---------------------|
1723   //      |                     |
1724   // SP-> | out_preserved_slots |
1725   //
1726   //
1727   // ****************************************************************************
1728   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1729   // arguments off of the stack after the jni call. Before the call we can use
1730   // instructions that are SP relative. After the jni call we switch to FP
1731   // relative instructions instead of re-adjusting the stack on windows.
1732   // ****************************************************************************
1733 
1734 
1735   // Now compute actual number of stack words we need rounding to make
1736   // stack properly aligned.
1737   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1738 
1739   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1740 
1741   intptr_t start = (intptr_t)__ pc();
1742 
1743   // First thing make an ic check to see if we should even be here
1744 
1745   // We are free to use all registers as temps without saving them and
1746   // restoring them except rbp. rbp is the only callee save register
1747   // as far as the interpreter and the compiler(s) are concerned.
1748 
1749 
1750   const Register ic_reg = rax;
1751   const Register receiver = rcx;
1752   Label hit;
1753   Label exception_pending;
1754 
1755   __ verify_oop(receiver);
1756   __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1757   __ jcc(Assembler::equal, hit);
1758 
1759   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1760 
1761   // verified entry must be aligned for code patching.
1762   // and the first 5 bytes must be in the same cache line
1763   // if we align at 8 then we will be sure 5 bytes are in the same line
1764   __ align(8);
1765 
1766   __ bind(hit);
1767 
1768   int vep_offset = ((intptr_t)__ pc()) - start;
1769 
1770 #ifdef COMPILER1
1771   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1772   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1773     inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
1774    }
1775 #endif // COMPILER1
1776 
1777   // The instruction at the verified entry point must be 5 bytes or longer
1778   // because it can be patched on the fly by make_non_entrant. The stack bang
1779   // instruction fits that requirement.
1780 
1781   // Generate stack overflow check
1782 
1783   if (UseStackBanging) {
1784     __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
1785   } else {
1786     // need a 5 byte instruction to allow MT safe patching to non-entrant
1787     __ fat_nop();
1788   }
1789 
1790   // Generate a new frame for the wrapper.
1791   __ enter();
1792   // -2 because return address is already present and so is saved rbp
1793   __ subptr(rsp, stack_size - 2*wordSize);
1794 
1795   // Frame is now completed as far as size and linkage.
1796   int frame_complete = ((intptr_t)__ pc()) - start;
1797 
1798   if (UseRTMLocking) {
1799     // Abort RTM transaction before calling JNI
1800     // because critical section will be large and will be
1801     // aborted anyway. Also nmethod could be deoptimized.
1802     __ xabort(0);
1803   }
1804 
1805   // Calculate the difference between rsp and rbp,. We need to know it
1806   // after the native call because on windows Java Natives will pop
1807   // the arguments and it is painful to do rsp relative addressing
1808   // in a platform independent way. So after the call we switch to
1809   // rbp, relative addressing.
1810 
1811   int fp_adjustment = stack_size - 2*wordSize;
1812 
1813 #ifdef COMPILER2
1814   // C2 may leave the stack dirty if not in SSE2+ mode
1815   if (UseSSE >= 2) {
1816     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1817   } else {
1818     __ empty_FPU_stack();
1819   }
1820 #endif /* COMPILER2 */
1821 
1822   // Compute the rbp, offset for any slots used after the jni call
1823 
1824   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1825 
1826   // We use rdi as a thread pointer because it is callee save and
1827   // if we load it once it is usable thru the entire wrapper
1828   const Register thread = rdi;
1829 
1830   // We use rsi as the oop handle for the receiver/klass
1831   // It is callee save so it survives the call to native
1832 
1833   const Register oop_handle_reg = rsi;
1834 
1835   __ get_thread(thread);
1836 
1837   if (is_critical_native) {
1838     check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
1839                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1840   }
1841 
1842   //
1843   // We immediately shuffle the arguments so that any vm call we have to
1844   // make from here on out (sync slow path, jvmti, etc.) we will have
1845   // captured the oops from our caller and have a valid oopMap for
1846   // them.
1847 
1848   // -----------------
1849   // The Grand Shuffle
1850   //
1851   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1852   // and, if static, the class mirror instead of a receiver.  This pretty much
1853   // guarantees that register layout will not match (and x86 doesn't use reg
1854   // parms though amd does).  Since the native abi doesn't use register args
1855   // and the java conventions does we don't have to worry about collisions.
1856   // All of our moved are reg->stack or stack->stack.
1857   // We ignore the extra arguments during the shuffle and handle them at the
1858   // last moment. The shuffle is described by the two calling convention
1859   // vectors we have in our possession. We simply walk the java vector to
1860   // get the source locations and the c vector to get the destinations.
1861 
1862   int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
1863 
1864   // Record rsp-based slot for receiver on stack for non-static methods
1865   int receiver_offset = -1;
1866 
1867   // This is a trick. We double the stack slots so we can claim
1868   // the oops in the caller's frame. Since we are sure to have
1869   // more args than the caller doubling is enough to make
1870   // sure we can capture all the incoming oop args from the
1871   // caller.
1872   //
1873   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1874 
1875   // Mark location of rbp,
1876   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1877 
1878   // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1879   // Are free to temporaries if we have to do  stack to steck moves.
1880   // All inbound args are referenced based on rbp, and all outbound args via rsp.
1881 
1882   for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1883     switch (in_sig_bt[i]) {
1884       case T_ARRAY:
1885         if (is_critical_native) {
1886           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1887           c_arg++;
1888           break;
1889         }
1890       case T_OBJECT:
1891         assert(!is_critical_native, "no oop arguments");
1892         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1893                     ((i == 0) && (!is_static)),
1894                     &receiver_offset);
1895         break;
1896       case T_VOID:
1897         break;
1898 
1899       case T_FLOAT:
1900         float_move(masm, in_regs[i], out_regs[c_arg]);
1901           break;
1902 
1903       case T_DOUBLE:
1904         assert( i + 1 < total_in_args &&
1905                 in_sig_bt[i + 1] == T_VOID &&
1906                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1907         double_move(masm, in_regs[i], out_regs[c_arg]);
1908         break;
1909 
1910       case T_LONG :
1911         long_move(masm, in_regs[i], out_regs[c_arg]);
1912         break;
1913 
1914       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1915 
1916       default:
1917         simple_move32(masm, in_regs[i], out_regs[c_arg]);
1918     }
1919   }
1920 
1921   // Pre-load a static method's oop into rsi.  Used both by locking code and
1922   // the normal JNI call code.
1923   if (method->is_static() && !is_critical_native) {
1924 
1925     //  load opp into a register
1926     __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1927 
1928     // Now handlize the static class mirror it's known not-null.
1929     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1930     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1931 
1932     // Now get the handle
1933     __ lea(oop_handle_reg, Address(rsp, klass_offset));
1934     // store the klass handle as second argument
1935     __ movptr(Address(rsp, wordSize), oop_handle_reg);
1936   }
1937 
1938   // Change state to native (we save the return address in the thread, since it might not
1939   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1940   // points into the right code segment. It does not have to be the correct return pc.
1941   // We use the same pc/oopMap repeatedly when we call out
1942 
1943   intptr_t the_pc = (intptr_t) __ pc();
1944   oop_maps->add_gc_map(the_pc - start, map);
1945 
1946   __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
1947 
1948 
1949   // We have all of the arguments setup at this point. We must not touch any register
1950   // argument registers at this point (what if we save/restore them there are no oop?
1951 
1952   {
1953     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1954     __ mov_metadata(rax, method());
1955     __ call_VM_leaf(
1956          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1957          thread, rax);
1958   }
1959 
1960   // RedefineClasses() tracing support for obsolete method entry
1961   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1962     __ mov_metadata(rax, method());
1963     __ call_VM_leaf(
1964          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1965          thread, rax);
1966   }
1967 
1968   // These are register definitions we need for locking/unlocking
1969   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1970   const Register obj_reg  = rcx;  // Will contain the oop
1971   const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1972 
1973   Label slow_path_lock;
1974   Label lock_done;
1975 
1976   // Lock a synchronized method
1977   if (method->is_synchronized()) {
1978     assert(!is_critical_native, "unhandled");
1979 
1980 
1981     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1982 
1983     // Get the handle (the 2nd argument)
1984     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1985 
1986     // Get address of the box
1987 
1988     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1989 
1990     // Load the oop from the handle
1991     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1992 
1993     if (UseBiasedLocking) {
1994       // Note that oop_handle_reg is trashed during this call
1995       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
1996     }
1997 
1998     // Load immediate 1 into swap_reg %rax,
1999     __ movptr(swap_reg, 1);
2000 
2001     // Load (object->mark() | 1) into swap_reg %rax,
2002     __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2003 
2004     // Save (object->mark() | 1) into BasicLock's displaced header
2005     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2006 
2007     if (os::is_MP()) {
2008       __ lock();
2009     }
2010 
2011     // src -> dest iff dest == rax, else rax, <- dest
2012     // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
2013     __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2014     __ jcc(Assembler::equal, lock_done);
2015 
2016     // Test if the oopMark is an obvious stack pointer, i.e.,
2017     //  1) (mark & 3) == 0, and
2018     //  2) rsp <= mark < mark + os::pagesize()
2019     // These 3 tests can be done by evaluating the following
2020     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2021     // assuming both stack pointer and pagesize have their
2022     // least significant 2 bits clear.
2023     // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
2024 
2025     __ subptr(swap_reg, rsp);
2026     __ andptr(swap_reg, 3 - os::vm_page_size());
2027 
2028     // Save the test result, for recursive case, the result is zero
2029     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2030     __ jcc(Assembler::notEqual, slow_path_lock);
2031     // Slow path will re-enter here
2032     __ bind(lock_done);
2033 
2034     if (UseBiasedLocking) {
2035       // Re-fetch oop_handle_reg as we trashed it above
2036       __ movptr(oop_handle_reg, Address(rsp, wordSize));
2037     }
2038   }
2039 
2040 
2041   // Finally just about ready to make the JNI call
2042 
2043 
2044   // get JNIEnv* which is first argument to native
2045   if (!is_critical_native) {
2046     __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
2047     __ movptr(Address(rsp, 0), rdx);
2048   }
2049 
2050   // Now set thread in native
2051   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
2052 
2053   __ call(RuntimeAddress(native_func));
2054 
2055   // Verify or restore cpu control state after JNI call
2056   __ restore_cpu_control_state_after_jni();
2057 
2058   // WARNING - on Windows Java Natives use pascal calling convention and pop the
2059   // arguments off of the stack. We could just re-adjust the stack pointer here
2060   // and continue to do SP relative addressing but we instead switch to FP
2061   // relative addressing.
2062 
2063   // Unpack native results.
2064   switch (ret_type) {
2065   case T_BOOLEAN: __ c2bool(rax);            break;
2066   case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
2067   case T_BYTE   : __ sign_extend_byte (rax); break;
2068   case T_SHORT  : __ sign_extend_short(rax); break;
2069   case T_INT    : /* nothing to do */        break;
2070   case T_DOUBLE :
2071   case T_FLOAT  :
2072     // Result is in st0 we'll save as needed
2073     break;
2074   case T_ARRAY:                 // Really a handle
2075   case T_OBJECT:                // Really a handle
2076       break; // can't de-handlize until after safepoint check
2077   case T_VOID: break;
2078   case T_LONG: break;
2079   default       : ShouldNotReachHere();
2080   }
2081 
2082   // Switch thread to "native transition" state before reading the synchronization state.
2083   // This additional state is necessary because reading and testing the synchronization
2084   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2085   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2086   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2087   //     Thread A is resumed to finish this native method, but doesn't block here since it
2088   //     didn't see any synchronization is progress, and escapes.
2089   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2090 
2091   if(os::is_MP()) {
2092     if (UseMembar) {
2093       // Force this write out before the read below
2094       __ membar(Assembler::Membar_mask_bits(
2095            Assembler::LoadLoad | Assembler::LoadStore |
2096            Assembler::StoreLoad | Assembler::StoreStore));
2097     } else {
2098       // Write serialization page so VM thread can do a pseudo remote membar.
2099       // We use the current thread pointer to calculate a thread specific
2100       // offset to write to within the page. This minimizes bus traffic
2101       // due to cache line collision.
2102       __ serialize_memory(thread, rcx);
2103     }
2104   }
2105 
2106   if (AlwaysRestoreFPU) {
2107     // Make sure the control word is correct.
2108     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2109   }
2110 
2111   Label after_transition;
2112 
2113   // check for safepoint operation in progress and/or pending suspend requests
2114   { Label Continue;
2115 
2116     __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2117              SafepointSynchronize::_not_synchronized);
2118 
2119     Label L;
2120     __ jcc(Assembler::notEqual, L);
2121     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
2122     __ jcc(Assembler::equal, Continue);
2123     __ bind(L);
2124 
2125     // Don't use call_VM as it will see a possible pending exception and forward it
2126     // and never return here preventing us from clearing _last_native_pc down below.
2127     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2128     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2129     // by hand.
2130     //
2131     __ vzeroupper();
2132 
2133     save_native_result(masm, ret_type, stack_slots);
2134     __ push(thread);
2135     if (!is_critical_native) {
2136       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2137                                               JavaThread::check_special_condition_for_native_trans)));
2138     } else {
2139       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2140                                               JavaThread::check_special_condition_for_native_trans_and_transition)));
2141     }
2142     __ increment(rsp, wordSize);
2143     // Restore any method result value
2144     restore_native_result(masm, ret_type, stack_slots);
2145 
2146     if (is_critical_native) {
2147       // The call above performed the transition to thread_in_Java so
2148       // skip the transition logic below.
2149       __ jmpb(after_transition);
2150     }
2151 
2152     __ bind(Continue);
2153   }
2154 
2155   // change thread state
2156   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
2157   __ bind(after_transition);
2158 
2159   Label reguard;
2160   Label reguard_done;
2161   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2162   __ jcc(Assembler::equal, reguard);
2163 
2164   // slow path reguard  re-enters here
2165   __ bind(reguard_done);
2166 
2167   // Handle possible exception (will unlock if necessary)
2168 
2169   // native result if any is live
2170 
2171   // Unlock
2172   Label slow_path_unlock;
2173   Label unlock_done;
2174   if (method->is_synchronized()) {
2175 
2176     Label done;
2177 
2178     // Get locked oop from the handle we passed to jni
2179     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2180 
2181     if (UseBiasedLocking) {
2182       __ biased_locking_exit(obj_reg, rbx, done);
2183     }
2184 
2185     // Simple recursive lock?
2186 
2187     __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2188     __ jcc(Assembler::equal, done);
2189 
2190     // Must save rax, if if it is live now because cmpxchg must use it
2191     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2192       save_native_result(masm, ret_type, stack_slots);
2193     }
2194 
2195     //  get old displaced header
2196     __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2197 
2198     // get address of the stack lock
2199     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2200 
2201     // Atomic swap old header if oop still contains the stack lock
2202     if (os::is_MP()) {
2203     __ lock();
2204     }
2205 
2206     // src -> dest iff dest == rax, else rax, <- dest
2207     // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2208     __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2209     __ jcc(Assembler::notEqual, slow_path_unlock);
2210 
2211     // slow path re-enters here
2212     __ bind(unlock_done);
2213     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2214       restore_native_result(masm, ret_type, stack_slots);
2215     }
2216 
2217     __ bind(done);
2218 
2219   }
2220 
2221   {
2222     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2223     // Tell dtrace about this method exit
2224     save_native_result(masm, ret_type, stack_slots);
2225     __ mov_metadata(rax, method());
2226     __ call_VM_leaf(
2227          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2228          thread, rax);
2229     restore_native_result(masm, ret_type, stack_slots);
2230   }
2231 
2232   // We can finally stop using that last_Java_frame we setup ages ago
2233 
2234   __ reset_last_Java_frame(thread, false);
2235 
2236   // Unbox oop result, e.g. JNIHandles::resolve value.
2237   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2238     __ resolve_jobject(rax /* value */,
2239                        thread /* thread */,
2240                        rcx /* tmp */);
2241   }
2242 
2243   if (CheckJNICalls) {
2244     // clear_pending_jni_exception_check
2245     __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2246   }
2247 
2248   if (!is_critical_native) {
2249     // reset handle block
2250     __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
2251     __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
2252 
2253     // Any exception pending?
2254     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2255     __ jcc(Assembler::notEqual, exception_pending);
2256   }
2257 
2258   // no exception, we're almost done
2259 
2260   // check that only result value is on FPU stack
2261   __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
2262 
2263   // Fixup floating pointer results so that result looks like a return from a compiled method
2264   if (ret_type == T_FLOAT) {
2265     if (UseSSE >= 1) {
2266       // Pop st0 and store as float and reload into xmm register
2267       __ fstp_s(Address(rbp, -4));
2268       __ movflt(xmm0, Address(rbp, -4));
2269     }
2270   } else if (ret_type == T_DOUBLE) {
2271     if (UseSSE >= 2) {
2272       // Pop st0 and store as double and reload into xmm register
2273       __ fstp_d(Address(rbp, -8));
2274       __ movdbl(xmm0, Address(rbp, -8));
2275     }
2276   }
2277 
2278   // Return
2279 
2280   __ leave();
2281   __ ret(0);
2282 
2283   // Unexpected paths are out of line and go here
2284 
2285   // Slow path locking & unlocking
2286   if (method->is_synchronized()) {
2287 
2288     // BEGIN Slow path lock
2289 
2290     __ bind(slow_path_lock);
2291 
2292     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2293     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2294     __ push(thread);
2295     __ push(lock_reg);
2296     __ push(obj_reg);
2297     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
2298     __ addptr(rsp, 3*wordSize);
2299 
2300 #ifdef ASSERT
2301     { Label L;
2302     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2303     __ jcc(Assembler::equal, L);
2304     __ stop("no pending exception allowed on exit from monitorenter");
2305     __ bind(L);
2306     }
2307 #endif
2308     __ jmp(lock_done);
2309 
2310     // END Slow path lock
2311 
2312     // BEGIN Slow path unlock
2313     __ bind(slow_path_unlock);
2314     __ vzeroupper();
2315     // Slow path unlock
2316 
2317     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2318       save_native_result(masm, ret_type, stack_slots);
2319     }
2320     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2321 
2322     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2323     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2324 
2325 
2326     // should be a peal
2327     // +wordSize because of the push above
2328     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2329     __ push(thread);
2330     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2331     __ push(rax);
2332 
2333     __ push(obj_reg);
2334     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2335     __ addptr(rsp, 3*wordSize);
2336 #ifdef ASSERT
2337     {
2338       Label L;
2339       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2340       __ jcc(Assembler::equal, L);
2341       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2342       __ bind(L);
2343     }
2344 #endif /* ASSERT */
2345 
2346     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2347 
2348     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2349       restore_native_result(masm, ret_type, stack_slots);
2350     }
2351     __ jmp(unlock_done);
2352     // END Slow path unlock
2353 
2354   }
2355 
2356   // SLOW PATH Reguard the stack if needed
2357 
2358   __ bind(reguard);
2359   __ vzeroupper();
2360   save_native_result(masm, ret_type, stack_slots);
2361   {
2362     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2363   }
2364   restore_native_result(masm, ret_type, stack_slots);
2365   __ jmp(reguard_done);
2366 
2367 
2368   // BEGIN EXCEPTION PROCESSING
2369 
2370   if (!is_critical_native) {
2371     // Forward  the exception
2372     __ bind(exception_pending);
2373 
2374     // remove possible return value from FPU register stack
2375     __ empty_FPU_stack();
2376 
2377     // pop our frame
2378     __ leave();
2379     // and forward the exception
2380     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2381   }
2382 
2383   __ flush();
2384 
2385   nmethod *nm = nmethod::new_native_nmethod(method,
2386                                             compile_id,
2387                                             masm->code(),
2388                                             vep_offset,
2389                                             frame_complete,
2390                                             stack_slots / VMRegImpl::slots_per_word,
2391                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2392                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2393                                             oop_maps);
2394 
2395   if (is_critical_native) {
2396     nm->set_lazy_critical_native(true);
2397   }
2398 
2399   return nm;
2400 
2401 }
2402 
2403 // this function returns the adjust size (in number of words) to a c2i adapter
2404 // activation for use during deoptimization
2405 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2406   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2407 }
2408 
2409 
2410 uint SharedRuntime::out_preserve_stack_slots() {
2411   return 0;
2412 }
2413 
2414 //------------------------------generate_deopt_blob----------------------------
2415 void SharedRuntime::generate_deopt_blob() {
2416   // allocate space for the code
2417   ResourceMark rm;
2418   // setup code generation tools
2419   // note: the buffer code size must account for StackShadowPages=50
2420   CodeBuffer   buffer("deopt_blob", 1536, 1024);
2421   MacroAssembler* masm = new MacroAssembler(&buffer);
2422   int frame_size_in_words;
2423   OopMap* map = NULL;
2424   // Account for the extra args we place on the stack
2425   // by the time we call fetch_unroll_info
2426   const int additional_words = 2; // deopt kind, thread
2427 
2428   OopMapSet *oop_maps = new OopMapSet();
2429 
2430   // -------------
2431   // This code enters when returning to a de-optimized nmethod.  A return
2432   // address has been pushed on the the stack, and return values are in
2433   // registers.
2434   // If we are doing a normal deopt then we were called from the patched
2435   // nmethod from the point we returned to the nmethod. So the return
2436   // address on the stack is wrong by NativeCall::instruction_size
2437   // We will adjust the value to it looks like we have the original return
2438   // address on the stack (like when we eagerly deoptimized).
2439   // In the case of an exception pending with deoptimized then we enter
2440   // with a return address on the stack that points after the call we patched
2441   // into the exception handler. We have the following register state:
2442   //    rax,: exception
2443   //    rbx,: exception handler
2444   //    rdx: throwing pc
2445   // So in this case we simply jam rdx into the useless return address and
2446   // the stack looks just like we want.
2447   //
2448   // At this point we need to de-opt.  We save the argument return
2449   // registers.  We call the first C routine, fetch_unroll_info().  This
2450   // routine captures the return values and returns a structure which
2451   // describes the current frame size and the sizes of all replacement frames.
2452   // The current frame is compiled code and may contain many inlined
2453   // functions, each with their own JVM state.  We pop the current frame, then
2454   // push all the new frames.  Then we call the C routine unpack_frames() to
2455   // populate these frames.  Finally unpack_frames() returns us the new target
2456   // address.  Notice that callee-save registers are BLOWN here; they have
2457   // already been captured in the vframeArray at the time the return PC was
2458   // patched.
2459   address start = __ pc();
2460   Label cont;
2461 
2462   // Prolog for non exception case!
2463 
2464   // Save everything in sight.
2465 
2466   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2467   // Normal deoptimization
2468   __ push(Deoptimization::Unpack_deopt);
2469   __ jmp(cont);
2470 
2471   int reexecute_offset = __ pc() - start;
2472 
2473   // Reexecute case
2474   // return address is the pc describes what bci to do re-execute at
2475 
2476   // No need to update map as each call to save_live_registers will produce identical oopmap
2477   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2478 
2479   __ push(Deoptimization::Unpack_reexecute);
2480   __ jmp(cont);
2481 
2482   int exception_offset = __ pc() - start;
2483 
2484   // Prolog for exception case
2485 
2486   // all registers are dead at this entry point, except for rax, and
2487   // rdx which contain the exception oop and exception pc
2488   // respectively.  Set them in TLS and fall thru to the
2489   // unpack_with_exception_in_tls entry point.
2490 
2491   __ get_thread(rdi);
2492   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2493   __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2494 
2495   int exception_in_tls_offset = __ pc() - start;
2496 
2497   // new implementation because exception oop is now passed in JavaThread
2498 
2499   // Prolog for exception case
2500   // All registers must be preserved because they might be used by LinearScan
2501   // Exceptiop oop and throwing PC are passed in JavaThread
2502   // tos: stack at point of call to method that threw the exception (i.e. only
2503   // args are on the stack, no return address)
2504 
2505   // make room on stack for the return address
2506   // It will be patched later with the throwing pc. The correct value is not
2507   // available now because loading it from memory would destroy registers.
2508   __ push(0);
2509 
2510   // Save everything in sight.
2511 
2512   // No need to update map as each call to save_live_registers will produce identical oopmap
2513   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2514 
2515   // Now it is safe to overwrite any register
2516 
2517   // store the correct deoptimization type
2518   __ push(Deoptimization::Unpack_exception);
2519 
2520   // load throwing pc from JavaThread and patch it as the return address
2521   // of the current frame. Then clear the field in JavaThread
2522   __ get_thread(rdi);
2523   __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2524   __ movptr(Address(rbp, wordSize), rdx);
2525   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2526 
2527 #ifdef ASSERT
2528   // verify that there is really an exception oop in JavaThread
2529   __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2530   __ verify_oop(rax);
2531 
2532   // verify that there is no pending exception
2533   Label no_pending_exception;
2534   __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2535   __ testptr(rax, rax);
2536   __ jcc(Assembler::zero, no_pending_exception);
2537   __ stop("must not have pending exception here");
2538   __ bind(no_pending_exception);
2539 #endif
2540 
2541   __ bind(cont);
2542 
2543   // Compiled code leaves the floating point stack dirty, empty it.
2544   __ empty_FPU_stack();
2545 
2546 
2547   // Call C code.  Need thread and this frame, but NOT official VM entry
2548   // crud.  We cannot block on this call, no GC can happen.
2549   __ get_thread(rcx);
2550   __ push(rcx);
2551   // fetch_unroll_info needs to call last_java_frame()
2552   __ set_last_Java_frame(rcx, noreg, noreg, NULL);
2553 
2554   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2555 
2556   // Need to have an oopmap that tells fetch_unroll_info where to
2557   // find any register it might need.
2558 
2559   oop_maps->add_gc_map( __ pc()-start, map);
2560 
2561   // Discard args to fetch_unroll_info
2562   __ pop(rcx);
2563   __ pop(rcx);
2564 
2565   __ get_thread(rcx);
2566   __ reset_last_Java_frame(rcx, false);
2567 
2568   // Load UnrollBlock into EDI
2569   __ mov(rdi, rax);
2570 
2571   // Move the unpack kind to a safe place in the UnrollBlock because
2572   // we are very short of registers
2573 
2574   Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2575   // retrieve the deopt kind from the UnrollBlock.
2576   __ movl(rax, unpack_kind);
2577 
2578    Label noException;
2579   __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
2580   __ jcc(Assembler::notEqual, noException);
2581   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2582   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2583   __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2584   __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2585 
2586   __ verify_oop(rax);
2587 
2588   // Overwrite the result registers with the exception results.
2589   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2590   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2591 
2592   __ bind(noException);
2593 
2594   // Stack is back to only having register save data on the stack.
2595   // Now restore the result registers. Everything else is either dead or captured
2596   // in the vframeArray.
2597 
2598   RegisterSaver::restore_result_registers(masm);
2599 
2600   // Non standard control word may be leaked out through a safepoint blob, and we can
2601   // deopt at a poll point with the non standard control word. However, we should make
2602   // sure the control word is correct after restore_result_registers.
2603   __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2604 
2605   // All of the register save area has been popped of the stack. Only the
2606   // return address remains.
2607 
2608   // Pop all the frames we must move/replace.
2609   //
2610   // Frame picture (youngest to oldest)
2611   // 1: self-frame (no frame link)
2612   // 2: deopting frame  (no frame link)
2613   // 3: caller of deopting frame (could be compiled/interpreted).
2614   //
2615   // Note: by leaving the return address of self-frame on the stack
2616   // and using the size of frame 2 to adjust the stack
2617   // when we are done the return to frame 3 will still be on the stack.
2618 
2619   // Pop deoptimized frame
2620   __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2621 
2622   // sp should be pointing at the return address to the caller (3)
2623 
2624   // Pick up the initial fp we should save
2625   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2626   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2627 
2628 #ifdef ASSERT
2629   // Compilers generate code that bang the stack by as much as the
2630   // interpreter would need. So this stack banging should never
2631   // trigger a fault. Verify that it does not on non product builds.
2632   if (UseStackBanging) {
2633     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2634     __ bang_stack_size(rbx, rcx);
2635   }
2636 #endif
2637 
2638   // Load array of frame pcs into ECX
2639   __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2640 
2641   __ pop(rsi); // trash the old pc
2642 
2643   // Load array of frame sizes into ESI
2644   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2645 
2646   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2647 
2648   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2649   __ movl(counter, rbx);
2650 
2651   // Now adjust the caller's stack to make up for the extra locals
2652   // but record the original sp so that we can save it in the skeletal interpreter
2653   // frame and the stack walking of interpreter_sender will get the unextended sp
2654   // value and not the "real" sp value.
2655 
2656   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2657   __ movptr(sp_temp, rsp);
2658   __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2659   __ subptr(rsp, rbx);
2660 
2661   // Push interpreter frames in a loop
2662   Label loop;
2663   __ bind(loop);
2664   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2665   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2666   __ pushptr(Address(rcx, 0));          // save return address
2667   __ enter();                           // save old & set new rbp,
2668   __ subptr(rsp, rbx);                  // Prolog!
2669   __ movptr(rbx, sp_temp);              // sender's sp
2670   // This value is corrected by layout_activation_impl
2671   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2672   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2673   __ movptr(sp_temp, rsp);              // pass to next frame
2674   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2675   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2676   __ decrementl(counter);             // decrement counter
2677   __ jcc(Assembler::notZero, loop);
2678   __ pushptr(Address(rcx, 0));          // save final return address
2679 
2680   // Re-push self-frame
2681   __ enter();                           // save old & set new rbp,
2682 
2683   //  Return address and rbp, are in place
2684   // We'll push additional args later. Just allocate a full sized
2685   // register save area
2686   __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2687 
2688   // Restore frame locals after moving the frame
2689   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2690   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2691   __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
2692   if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2693   if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2694 
2695   // Set up the args to unpack_frame
2696 
2697   __ pushl(unpack_kind);                     // get the unpack_kind value
2698   __ get_thread(rcx);
2699   __ push(rcx);
2700 
2701   // set last_Java_sp, last_Java_fp
2702   __ set_last_Java_frame(rcx, noreg, rbp, NULL);
2703 
2704   // Call C code.  Need thread but NOT official VM entry
2705   // crud.  We cannot block on this call, no GC can happen.  Call should
2706   // restore return values to their stack-slots with the new SP.
2707   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2708   // Set an oopmap for the call site
2709   oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2710 
2711   // rax, contains the return result type
2712   __ push(rax);
2713 
2714   __ get_thread(rcx);
2715   __ reset_last_Java_frame(rcx, false);
2716 
2717   // Collect return values
2718   __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2719   __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2720 
2721   // Clear floating point stack before returning to interpreter
2722   __ empty_FPU_stack();
2723 
2724   // Check if we should push the float or double return value.
2725   Label results_done, yes_double_value;
2726   __ cmpl(Address(rsp, 0), T_DOUBLE);
2727   __ jcc (Assembler::zero, yes_double_value);
2728   __ cmpl(Address(rsp, 0), T_FLOAT);
2729   __ jcc (Assembler::notZero, results_done);
2730 
2731   // return float value as expected by interpreter
2732   if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2733   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2734   __ jmp(results_done);
2735 
2736   // return double value as expected by interpreter
2737   __ bind(yes_double_value);
2738   if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2739   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2740 
2741   __ bind(results_done);
2742 
2743   // Pop self-frame.
2744   __ leave();                              // Epilog!
2745 
2746   // Jump to interpreter
2747   __ ret(0);
2748 
2749   // -------------
2750   // make sure all code is generated
2751   masm->flush();
2752 
2753   _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2754   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2755 }
2756 
2757 
2758 #ifdef COMPILER2
2759 //------------------------------generate_uncommon_trap_blob--------------------
2760 void SharedRuntime::generate_uncommon_trap_blob() {
2761   // allocate space for the code
2762   ResourceMark rm;
2763   // setup code generation tools
2764   CodeBuffer   buffer("uncommon_trap_blob", 512, 512);
2765   MacroAssembler* masm = new MacroAssembler(&buffer);
2766 
2767   enum frame_layout {
2768     arg0_off,      // thread                     sp + 0 // Arg location for
2769     arg1_off,      // unloaded_class_index       sp + 1 // calling C
2770     arg2_off,      // exec_mode                  sp + 2
2771     // The frame sender code expects that rbp will be in the "natural" place and
2772     // will override any oopMap setting for it. We must therefore force the layout
2773     // so that it agrees with the frame sender code.
2774     rbp_off,       // callee saved register      sp + 3
2775     return_off,    // slot for return address    sp + 4
2776     framesize
2777   };
2778 
2779   address start = __ pc();
2780 
2781   if (UseRTMLocking) {
2782     // Abort RTM transaction before possible nmethod deoptimization.
2783     __ xabort(0);
2784   }
2785 
2786   // Push self-frame.
2787   __ subptr(rsp, return_off*wordSize);     // Epilog!
2788 
2789   // rbp, is an implicitly saved callee saved register (i.e. the calling
2790   // convention will save restore it in prolog/epilog) Other than that
2791   // there are no callee save registers no that adapter frames are gone.
2792   __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2793 
2794   // Clear the floating point exception stack
2795   __ empty_FPU_stack();
2796 
2797   // set last_Java_sp
2798   __ get_thread(rdx);
2799   __ set_last_Java_frame(rdx, noreg, noreg, NULL);
2800 
2801   // Call C code.  Need thread but NOT official VM entry
2802   // crud.  We cannot block on this call, no GC can happen.  Call should
2803   // capture callee-saved registers as well as return values.
2804   __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2805   // argument already in ECX
2806   __ movl(Address(rsp, arg1_off*wordSize),rcx);
2807   __ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2808   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2809 
2810   // Set an oopmap for the call site
2811   OopMapSet *oop_maps = new OopMapSet();
2812   OopMap* map =  new OopMap( framesize, 0 );
2813   // No oopMap for rbp, it is known implicitly
2814 
2815   oop_maps->add_gc_map( __ pc()-start, map);
2816 
2817   __ get_thread(rcx);
2818 
2819   __ reset_last_Java_frame(rcx, false);
2820 
2821   // Load UnrollBlock into EDI
2822   __ movptr(rdi, rax);
2823 
2824 #ifdef ASSERT
2825   { Label L;
2826     __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
2827             (int32_t)Deoptimization::Unpack_uncommon_trap);
2828     __ jcc(Assembler::equal, L);
2829     __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
2830     __ bind(L);
2831   }
2832 #endif
2833 
2834   // Pop all the frames we must move/replace.
2835   //
2836   // Frame picture (youngest to oldest)
2837   // 1: self-frame (no frame link)
2838   // 2: deopting frame  (no frame link)
2839   // 3: caller of deopting frame (could be compiled/interpreted).
2840 
2841   // Pop self-frame.  We have no frame, and must rely only on EAX and ESP.
2842   __ addptr(rsp,(framesize-1)*wordSize);     // Epilog!
2843 
2844   // Pop deoptimized frame
2845   __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2846   __ addptr(rsp, rcx);
2847 
2848   // sp should be pointing at the return address to the caller (3)
2849 
2850   // Pick up the initial fp we should save
2851   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2852   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2853 
2854 #ifdef ASSERT
2855   // Compilers generate code that bang the stack by as much as the
2856   // interpreter would need. So this stack banging should never
2857   // trigger a fault. Verify that it does not on non product builds.
2858   if (UseStackBanging) {
2859     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2860     __ bang_stack_size(rbx, rcx);
2861   }
2862 #endif
2863 
2864   // Load array of frame pcs into ECX
2865   __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2866 
2867   __ pop(rsi); // trash the pc
2868 
2869   // Load array of frame sizes into ESI
2870   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2871 
2872   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2873 
2874   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2875   __ movl(counter, rbx);
2876 
2877   // Now adjust the caller's stack to make up for the extra locals
2878   // but record the original sp so that we can save it in the skeletal interpreter
2879   // frame and the stack walking of interpreter_sender will get the unextended sp
2880   // value and not the "real" sp value.
2881 
2882   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2883   __ movptr(sp_temp, rsp);
2884   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2885   __ subptr(rsp, rbx);
2886 
2887   // Push interpreter frames in a loop
2888   Label loop;
2889   __ bind(loop);
2890   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2891   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2892   __ pushptr(Address(rcx, 0));          // save return address
2893   __ enter();                           // save old & set new rbp,
2894   __ subptr(rsp, rbx);                  // Prolog!
2895   __ movptr(rbx, sp_temp);              // sender's sp
2896   // This value is corrected by layout_activation_impl
2897   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2898   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2899   __ movptr(sp_temp, rsp);              // pass to next frame
2900   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2901   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2902   __ decrementl(counter);             // decrement counter
2903   __ jcc(Assembler::notZero, loop);
2904   __ pushptr(Address(rcx, 0));            // save final return address
2905 
2906   // Re-push self-frame
2907   __ enter();                           // save old & set new rbp,
2908   __ subptr(rsp, (framesize-2) * wordSize);   // Prolog!
2909 
2910 
2911   // set last_Java_sp, last_Java_fp
2912   __ get_thread(rdi);
2913   __ set_last_Java_frame(rdi, noreg, rbp, NULL);
2914 
2915   // Call C code.  Need thread but NOT official VM entry
2916   // crud.  We cannot block on this call, no GC can happen.  Call should
2917   // restore return values to their stack-slots with the new SP.
2918   __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2919   __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2920   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2921   // Set an oopmap for the call site
2922   oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2923 
2924   __ get_thread(rdi);
2925   __ reset_last_Java_frame(rdi, true);
2926 
2927   // Pop self-frame.
2928   __ leave();     // Epilog!
2929 
2930   // Jump to interpreter
2931   __ ret(0);
2932 
2933   // -------------
2934   // make sure all code is generated
2935   masm->flush();
2936 
2937    _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2938 }
2939 #endif // COMPILER2
2940 
2941 //------------------------------generate_handler_blob------
2942 //
2943 // Generate a special Compile2Runtime blob that saves all registers,
2944 // setup oopmap, and calls safepoint code to stop the compiled code for
2945 // a safepoint.
2946 //
2947 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2948 
2949   // Account for thread arg in our frame
2950   const int additional_words = 1;
2951   int frame_size_in_words;
2952 
2953   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2954 
2955   ResourceMark rm;
2956   OopMapSet *oop_maps = new OopMapSet();
2957   OopMap* map;
2958 
2959   // allocate space for the code
2960   // setup code generation tools
2961   CodeBuffer   buffer("handler_blob", 1024, 512);
2962   MacroAssembler* masm = new MacroAssembler(&buffer);
2963 
2964   const Register java_thread = rdi; // callee-saved for VC++
2965   address start   = __ pc();
2966   address call_pc = NULL;
2967   bool cause_return = (poll_type == POLL_AT_RETURN);
2968   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2969 
2970   if (UseRTMLocking) {
2971     // Abort RTM transaction before calling runtime
2972     // because critical section will be large and will be
2973     // aborted anyway. Also nmethod could be deoptimized.
2974     __ xabort(0);
2975   }
2976 
2977   // If cause_return is true we are at a poll_return and there is
2978   // the return address on the stack to the caller on the nmethod
2979   // that is safepoint. We can leave this return on the stack and
2980   // effectively complete the return and safepoint in the caller.
2981   // Otherwise we push space for a return address that the safepoint
2982   // handler will install later to make the stack walking sensible.
2983   if (!cause_return)
2984     __ push(rbx);  // Make room for return address (or push it again)
2985 
2986   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
2987 
2988   // The following is basically a call_VM. However, we need the precise
2989   // address of the call in order to generate an oopmap. Hence, we do all the
2990   // work ourselves.
2991 
2992   // Push thread argument and setup last_Java_sp
2993   __ get_thread(java_thread);
2994   __ push(java_thread);
2995   __ set_last_Java_frame(java_thread, noreg, noreg, NULL);
2996 
2997   // if this was not a poll_return then we need to correct the return address now.
2998   if (!cause_return) {
2999     __ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
3000     __ movptr(Address(rbp, wordSize), rax);
3001   }
3002 
3003   // do the call
3004   __ call(RuntimeAddress(call_ptr));
3005 
3006   // Set an oopmap for the call site.  This oopmap will map all
3007   // oop-registers and debug-info registers as callee-saved.  This
3008   // will allow deoptimization at this safepoint to find all possible
3009   // debug-info recordings, as well as let GC find all oops.
3010 
3011   oop_maps->add_gc_map( __ pc() - start, map);
3012 
3013   // Discard arg
3014   __ pop(rcx);
3015 
3016   Label noException;
3017 
3018   // Clear last_Java_sp again
3019   __ get_thread(java_thread);
3020   __ reset_last_Java_frame(java_thread, false);
3021 
3022   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3023   __ jcc(Assembler::equal, noException);
3024 
3025   // Exception pending
3026   RegisterSaver::restore_live_registers(masm, save_vectors);
3027 
3028   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3029 
3030   __ bind(noException);
3031 
3032   // Normal exit, register restoring and exit
3033   RegisterSaver::restore_live_registers(masm, save_vectors);
3034 
3035   __ ret(0);
3036 
3037   // make sure all code is generated
3038   masm->flush();
3039 
3040   // Fill-out other meta info
3041   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3042 }
3043 
3044 //
3045 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3046 //
3047 // Generate a stub that calls into vm to find out the proper destination
3048 // of a java call. All the argument registers are live at this point
3049 // but since this is generic code we don't know what they are and the caller
3050 // must do any gc of the args.
3051 //
3052 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3053   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3054 
3055   // allocate space for the code
3056   ResourceMark rm;
3057 
3058   CodeBuffer buffer(name, 1000, 512);
3059   MacroAssembler* masm                = new MacroAssembler(&buffer);
3060 
3061   int frame_size_words;
3062   enum frame_layout {
3063                 thread_off,
3064                 extra_words };
3065 
3066   OopMapSet *oop_maps = new OopMapSet();
3067   OopMap* map = NULL;
3068 
3069   int start = __ offset();
3070 
3071   map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
3072 
3073   int frame_complete = __ offset();
3074 
3075   const Register thread = rdi;
3076   __ get_thread(rdi);
3077 
3078   __ push(thread);
3079   __ set_last_Java_frame(thread, noreg, rbp, NULL);
3080 
3081   __ call(RuntimeAddress(destination));
3082 
3083 
3084   // Set an oopmap for the call site.
3085   // We need this not only for callee-saved registers, but also for volatile
3086   // registers that the compiler might be keeping live across a safepoint.
3087 
3088   oop_maps->add_gc_map( __ offset() - start, map);
3089 
3090   // rax, contains the address we are going to jump to assuming no exception got installed
3091 
3092   __ addptr(rsp, wordSize);
3093 
3094   // clear last_Java_sp
3095   __ reset_last_Java_frame(thread, true);
3096   // check for pending exceptions
3097   Label pending;
3098   __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3099   __ jcc(Assembler::notEqual, pending);
3100 
3101   // get the returned Method*
3102   __ get_vm_result_2(rbx, thread);
3103   __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
3104 
3105   __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
3106 
3107   RegisterSaver::restore_live_registers(masm);
3108 
3109   // We are back the the original state on entry and ready to go.
3110 
3111   __ jmp(rax);
3112 
3113   // Pending exception after the safepoint
3114 
3115   __ bind(pending);
3116 
3117   RegisterSaver::restore_live_registers(masm);
3118 
3119   // exception pending => remove activation and forward to exception handler
3120 
3121   __ get_thread(thread);
3122   __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
3123   __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
3124   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3125 
3126   // -------------
3127   // make sure all code is generated
3128   masm->flush();
3129 
3130   // return the  blob
3131   // frame_size_words or bytes??
3132   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3133 }