1 /*
   2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "classfile/symbolTable.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nativeInst.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gcLocker.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "oops/compiledICHolder.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/vframeArray.hpp"
  45 #include "utilities/align.hpp"
  46 #include "utilities/formatBuffer.hpp"
  47 #include "vm_version_x86.hpp"
  48 #include "vmreg_x86.inline.hpp"
  49 #ifdef COMPILER1
  50 #include "c1/c1_Runtime1.hpp"
  51 #endif
  52 #ifdef COMPILER2
  53 #include "opto/runtime.hpp"
  54 #endif
  55 #if INCLUDE_JVMCI
  56 #include "jvmci/jvmciJavaClasses.hpp"
  57 #endif
  58 
  59 #define __ masm->
  60 
  61 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  62 
  63 class SimpleRuntimeFrame {
  64 
  65   public:
  66 
  67   // Most of the runtime stubs have this simple frame layout.
  68   // This class exists to make the layout shared in one place.
  69   // Offsets are for compiler stack slots, which are jints.
  70   enum layout {
  71     // The frame sender code expects that rbp will be in the "natural" place and
  72     // will override any oopMap setting for it. We must therefore force the layout
  73     // so that it agrees with the frame sender code.
  74     rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
  75     rbp_off2,
  76     return_off, return_off2,
  77     framesize
  78   };
  79 };
  80 
  81 class RegisterSaver {
  82   // Capture info about frame layout.  Layout offsets are in jint
  83   // units because compiler frame slots are jints.
  84 #define XSAVE_AREA_BEGIN 160
  85 #define XSAVE_AREA_YMM_BEGIN 576
  86 #define XSAVE_AREA_ZMM_BEGIN 1152
  87 #define XSAVE_AREA_UPPERBANK 1664
  88 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  89 #define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
  90 #define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
  91   enum layout {
  92     fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
  93     xmm_off       = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt,            // offset in fxsave save area
  94     DEF_XMM_OFFS(0),
  95     DEF_XMM_OFFS(1),
  96     // 2..15 are implied in range usage
  97     ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
  98     DEF_YMM_OFFS(0),
  99     DEF_YMM_OFFS(1),
 100     // 2..15 are implied in range usage
 101     zmm_high = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
 102     zmm_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
 103     DEF_ZMM_OFFS(16),
 104     DEF_ZMM_OFFS(17),
 105     // 18..31 are implied in range usage
 106     fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
 107     fpu_stateH_end,
 108     r15_off, r15H_off,
 109     r14_off, r14H_off,
 110     r13_off, r13H_off,
 111     r12_off, r12H_off,
 112     r11_off, r11H_off,
 113     r10_off, r10H_off,
 114     r9_off,  r9H_off,
 115     r8_off,  r8H_off,
 116     rdi_off, rdiH_off,
 117     rsi_off, rsiH_off,
 118     ignore_off, ignoreH_off,  // extra copy of rbp
 119     rsp_off, rspH_off,
 120     rbx_off, rbxH_off,
 121     rdx_off, rdxH_off,
 122     rcx_off, rcxH_off,
 123     rax_off, raxH_off,
 124     // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
 125     align_off, alignH_off,
 126     flags_off, flagsH_off,
 127     // The frame sender code expects that rbp will be in the "natural" place and
 128     // will override any oopMap setting for it. We must therefore force the layout
 129     // so that it agrees with the frame sender code.
 130     rbp_off, rbpH_off,        // copy of rbp we will restore
 131     return_off, returnH_off,  // slot for return address
 132     reg_save_size             // size in compiler stack slots
 133   };
 134 
 135  public:
 136   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
 137   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
 138 
 139   // Offsets into the register save area
 140   // Used by deoptimization when it is managing result register
 141   // values on its own
 142 
 143   static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
 144   static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
 145   static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }
 146   static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
 147   static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
 148 
 149   // During deoptimization only the result registers need to be restored,
 150   // all the other values have already been extracted.
 151   static void restore_result_registers(MacroAssembler* masm);
 152 };
 153 
 154 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
 155   int off = 0;
 156   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 157   if (UseAVX < 3) {
 158     num_xmm_regs = num_xmm_regs/2;
 159   }
 160 #if COMPILER2_OR_JVMCI
 161   if (save_vectors) {
 162     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 163     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 164   }
 165 #else
 166   assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
 167 #endif
 168 
 169   // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
 170   int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
 171   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 172   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 173   // CodeBlob frame size is in words.
 174   int frame_size_in_words = frame_size_in_bytes / wordSize;
 175   *total_frame_words = frame_size_in_words;
 176 
 177   // Save registers, fpu state, and flags.
 178   // We assume caller has already pushed the return address onto the
 179   // stack, so rsp is 8-byte aligned here.
 180   // We push rpb twice in this sequence because we want the real rbp
 181   // to be under the return like a normal enter.
 182 
 183   __ enter();          // rsp becomes 16-byte aligned here
 184   __ push_CPU_state(); // Push a multiple of 16 bytes
 185 
 186   // push cpu state handles this on EVEX enabled targets
 187   if (save_vectors) {
 188     // Save upper half of YMM registers(0..15)
 189     int base_addr = XSAVE_AREA_YMM_BEGIN;
 190     for (int n = 0; n < 16; n++) {
 191       __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
 192     }
 193     if (VM_Version::supports_evex()) {
 194       // Save upper half of ZMM registers(0..15)
 195       base_addr = XSAVE_AREA_ZMM_BEGIN;
 196       for (int n = 0; n < 16; n++) {
 197         __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
 198       }
 199       // Save full ZMM registers(16..num_xmm_regs)
 200       base_addr = XSAVE_AREA_UPPERBANK;
 201       off = 0;
 202       int vector_len = Assembler::AVX_512bit;
 203       for (int n = 16; n < num_xmm_regs; n++) {
 204         __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
 205       }
 206     }
 207   } else {
 208     if (VM_Version::supports_evex()) {
 209       // Save upper bank of ZMM registers(16..31) for double/float usage
 210       int base_addr = XSAVE_AREA_UPPERBANK;
 211       off = 0;
 212       for (int n = 16; n < num_xmm_regs; n++) {
 213         __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
 214       }
 215     }
 216   }
 217   __ vzeroupper();
 218   if (frame::arg_reg_save_area_bytes != 0) {
 219     // Allocate argument register save area
 220     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 221   }
 222 
 223   // Set an oopmap for the call site.  This oopmap will map all
 224   // oop-registers and debug-info registers as callee-saved.  This
 225   // will allow deoptimization at this safepoint to find all possible
 226   // debug-info recordings, as well as let GC find all oops.
 227 
 228   OopMapSet *oop_maps = new OopMapSet();
 229   OopMap* map = new OopMap(frame_size_in_slots, 0);
 230 
 231 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
 232 
 233   map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
 234   map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
 235   map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
 236   map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
 237   // rbp location is known implicitly by the frame sender code, needs no oopmap
 238   // and the location where rbp was saved by is ignored
 239   map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
 240   map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
 241   map->set_callee_saved(STACK_OFFSET( r8_off  ), r8->as_VMReg());
 242   map->set_callee_saved(STACK_OFFSET( r9_off  ), r9->as_VMReg());
 243   map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
 244   map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
 245   map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
 246   map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
 247   map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
 248   map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
 249   // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 250   // on EVEX enabled targets, we get it included in the xsave area
 251   off = xmm0_off;
 252   int delta = xmm1_off - off;
 253   for (int n = 0; n < 16; n++) {
 254     XMMRegister xmm_name = as_XMMRegister(n);
 255     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 256     off += delta;
 257   }
 258   if(UseAVX > 2) {
 259     // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
 260     off = zmm16_off;
 261     delta = zmm17_off - off;
 262     for (int n = 16; n < num_xmm_regs; n++) {
 263       XMMRegister zmm_name = as_XMMRegister(n);
 264       map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
 265       off += delta;
 266     }
 267   }
 268 
 269 #if COMPILER2_OR_JVMCI
 270   if (save_vectors) {
 271     off = ymm0_off;
 272     int delta = ymm1_off - off;
 273     for (int n = 0; n < 16; n++) {
 274       XMMRegister ymm_name = as_XMMRegister(n);
 275       map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
 276       off += delta;
 277     }
 278   }
 279 #endif // COMPILER2_OR_JVMCI
 280 
 281   // %%% These should all be a waste but we'll keep things as they were for now
 282   if (true) {
 283     map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
 284     map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
 285     map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
 286     map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
 287     // rbp location is known implicitly by the frame sender code, needs no oopmap
 288     map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
 289     map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
 290     map->set_callee_saved(STACK_OFFSET( r8H_off  ), r8->as_VMReg()->next());
 291     map->set_callee_saved(STACK_OFFSET( r9H_off  ), r9->as_VMReg()->next());
 292     map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
 293     map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
 294     map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
 295     map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
 296     map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
 297     map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
 298     // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 299     // on EVEX enabled targets, we get it included in the xsave area
 300     off = xmm0H_off;
 301     delta = xmm1H_off - off;
 302     for (int n = 0; n < 16; n++) {
 303       XMMRegister xmm_name = as_XMMRegister(n);
 304       map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
 305       off += delta;
 306     }
 307     if (UseAVX > 2) {
 308       // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
 309       off = zmm16H_off;
 310       delta = zmm17H_off - off;
 311       for (int n = 16; n < num_xmm_regs; n++) {
 312         XMMRegister zmm_name = as_XMMRegister(n);
 313         map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
 314         off += delta;
 315       }
 316     }
 317   }
 318 
 319   return map;
 320 }
 321 
 322 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 323   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 324   if (UseAVX < 3) {
 325     num_xmm_regs = num_xmm_regs/2;
 326   }
 327   if (frame::arg_reg_save_area_bytes != 0) {
 328     // Pop arg register save area
 329     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 330   }
 331 
 332 #if COMPILER2_OR_JVMCI
 333   if (restore_vectors) {
 334     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 335     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 336   }
 337 #else
 338   assert(!restore_vectors, "vectors are generated only by C2");
 339 #endif
 340 
 341   __ vzeroupper();
 342 
 343   // On EVEX enabled targets everything is handled in pop fpu state
 344   if (restore_vectors) {
 345     // Restore upper half of YMM registers (0..15)
 346     int base_addr = XSAVE_AREA_YMM_BEGIN;
 347     for (int n = 0; n < 16; n++) {
 348       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
 349     }
 350     if (VM_Version::supports_evex()) {
 351       // Restore upper half of ZMM registers (0..15)
 352       base_addr = XSAVE_AREA_ZMM_BEGIN;
 353       for (int n = 0; n < 16; n++) {
 354         __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
 355       }
 356       // Restore full ZMM registers(16..num_xmm_regs)
 357       base_addr = XSAVE_AREA_UPPERBANK;
 358       int vector_len = Assembler::AVX_512bit;
 359       int off = 0;
 360       for (int n = 16; n < num_xmm_regs; n++) {
 361         __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
 362       }
 363     }
 364   } else {
 365     if (VM_Version::supports_evex()) {
 366       // Restore upper bank of ZMM registers(16..31) for double/float usage
 367       int base_addr = XSAVE_AREA_UPPERBANK;
 368       int off = 0;
 369       for (int n = 16; n < num_xmm_regs; n++) {
 370         __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
 371       }
 372     }
 373   }
 374 
 375   // Recover CPU state
 376   __ pop_CPU_state();
 377   // Get the rbp described implicitly by the calling convention (no oopMap)
 378   __ pop(rbp);
 379 }
 380 
 381 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 382 
 383   // Just restore result register. Only used by deoptimization. By
 384   // now any callee save register that needs to be restored to a c2
 385   // caller of the deoptee has been extracted into the vframeArray
 386   // and will be stuffed into the c2i adapter we create for later
 387   // restoration so only result registers need to be restored here.
 388 
 389   // Restore fp result register
 390   __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
 391   // Restore integer result register
 392   __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
 393   __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
 394 
 395   // Pop all of the register save are off the stack except the return address
 396   __ addptr(rsp, return_offset_in_bytes());
 397 }
 398 
 399 // Is vector's size (in bytes) bigger than a size saved by default?
 400 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
 401 bool SharedRuntime::is_wide_vector(int size) {
 402   return size > 16;
 403 }
 404 
 405 size_t SharedRuntime::trampoline_size() {
 406   return 16;
 407 }
 408 
 409 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 410   __ jump(RuntimeAddress(destination));
 411 }
 412 
 413 // The java_calling_convention describes stack locations as ideal slots on
 414 // a frame with no abi restrictions. Since we must observe abi restrictions
 415 // (like the placement of the register window) the slots must be biased by
 416 // the following value.
 417 static int reg2offset_in(VMReg r) {
 418   // Account for saved rbp and return address
 419   // This should really be in_preserve_stack_slots
 420   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
 421 }
 422 
 423 static int reg2offset_out(VMReg r) {
 424   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 425 }
 426 
 427 // ---------------------------------------------------------------------------
 428 // Read the array of BasicTypes from a signature, and compute where the
 429 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 430 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 431 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 432 // as framesizes are fixed.
 433 // VMRegImpl::stack0 refers to the first slot 0(sp).
 434 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 435 // up to RegisterImpl::number_of_registers) are the 64-bit
 436 // integer registers.
 437 
 438 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 439 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 440 // units regardless of build. Of course for i486 there is no 64 bit build
 441 
 442 // The Java calling convention is a "shifted" version of the C ABI.
 443 // By skipping the first C ABI register we can call non-static jni methods
 444 // with small numbers of arguments without having to shuffle the arguments
 445 // at all. Since we control the java ABI we ought to at least get some
 446 // advantage out of it.
 447 
 448 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 449                                            VMRegPair *regs,
 450                                            int total_args_passed,
 451                                            int is_outgoing) {
 452 
 453   // Create the mapping between argument positions and
 454   // registers.
 455   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 456     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
 457   };
 458   static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 459     j_farg0, j_farg1, j_farg2, j_farg3,
 460     j_farg4, j_farg5, j_farg6, j_farg7
 461   };
 462 
 463 
 464   uint int_args = 0;
 465   uint fp_args = 0;
 466   uint stk_args = 0; // inc by 2 each time
 467 
 468   for (int i = 0; i < total_args_passed; i++) {
 469     switch (sig_bt[i]) {
 470     case T_BOOLEAN:
 471     case T_CHAR:
 472     case T_BYTE:
 473     case T_SHORT:
 474     case T_INT:
 475       if (int_args < Argument::n_int_register_parameters_j) {
 476         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 477       } else {
 478         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 479         stk_args += 2;
 480       }
 481       break;
 482     case T_VOID:
 483       // halves of T_LONG or T_DOUBLE
 484       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 485       regs[i].set_bad();
 486       break;
 487     case T_LONG:
 488       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 489       // fall through
 490     case T_OBJECT:
 491     case T_ARRAY:
 492     case T_ADDRESS:
 493     case T_VALUETYPEPTR:
 494       if (int_args < Argument::n_int_register_parameters_j) {
 495         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 496       } else {
 497         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 498         stk_args += 2;
 499       }
 500       break;
 501     case T_FLOAT:
 502       if (fp_args < Argument::n_float_register_parameters_j) {
 503         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 504       } else {
 505         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 506         stk_args += 2;
 507       }
 508       break;
 509     case T_DOUBLE:
 510       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 511       if (fp_args < Argument::n_float_register_parameters_j) {
 512         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 513       } else {
 514         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 515         stk_args += 2;
 516       }
 517       break;
 518     default:
 519       ShouldNotReachHere();
 520       break;
 521     }
 522   }
 523 
 524   return align_up(stk_args, 2);
 525 }
 526 
 527 // Same as java_calling_convention() but for multiple return
 528 // values. There's no way to store them on the stack so if we don't
 529 // have enough registers, multiple values can't be returned.
 530 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1;
 531 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 532 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
 533                                           VMRegPair *regs,
 534                                           int total_args_passed) {
 535   // Create the mapping between argument positions and
 536   // registers.
 537   static const Register INT_ArgReg[java_return_convention_max_int] = {
 538     rax, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 539   };
 540   static const XMMRegister FP_ArgReg[java_return_convention_max_float] = {
 541     j_farg0, j_farg1, j_farg2, j_farg3,
 542     j_farg4, j_farg5, j_farg6, j_farg7
 543   };
 544 
 545 
 546   uint int_args = 0;
 547   uint fp_args = 0;
 548 
 549   for (int i = 0; i < total_args_passed; i++) {
 550     switch (sig_bt[i]) {
 551     case T_BOOLEAN:
 552     case T_CHAR:
 553     case T_BYTE:
 554     case T_SHORT:
 555     case T_INT:
 556       if (int_args < Argument::n_int_register_parameters_j+1) {
 557         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 558         int_args++;
 559       } else {
 560         return -1;
 561       }
 562       break;
 563     case T_VOID:
 564       // halves of T_LONG or T_DOUBLE
 565       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 566       regs[i].set_bad();
 567       break;
 568     case T_LONG:
 569       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 570       // fall through
 571     case T_OBJECT:
 572     case T_ARRAY:
 573     case T_ADDRESS:
 574     case T_METADATA:
 575     case T_VALUETYPEPTR:
 576       if (int_args < Argument::n_int_register_parameters_j+1) {
 577         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 578         int_args++;
 579       } else {
 580         return -1;
 581       }
 582       break;
 583     case T_FLOAT:
 584       if (fp_args < Argument::n_float_register_parameters_j) {
 585         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 586         fp_args++;
 587       } else {
 588         return -1;
 589       }
 590       break;
 591     case T_DOUBLE:
 592       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 593       if (fp_args < Argument::n_float_register_parameters_j) {
 594         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 595         fp_args++;
 596       } else {
 597         return -1;
 598       }
 599       break;
 600     default:
 601       ShouldNotReachHere();
 602       break;
 603     }
 604   }
 605 
 606   return int_args + fp_args;
 607 }
 608 
 609 // Patch the callers callsite with entry to compiled code if it exists.
 610 static void patch_callers_callsite(MacroAssembler *masm) {
 611   Label L;
 612   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 613   __ jcc(Assembler::equal, L);
 614 
 615   // Save the current stack pointer
 616   __ mov(r13, rsp);
 617   // Schedule the branch target address early.
 618   // Call into the VM to patch the caller, then jump to compiled callee
 619   // rax isn't live so capture return address while we easily can
 620   __ movptr(rax, Address(rsp, 0));
 621 
 622   // align stack so push_CPU_state doesn't fault
 623   __ andptr(rsp, -(StackAlignmentInBytes));
 624   __ push_CPU_state();
 625   __ vzeroupper();
 626   // VM needs caller's callsite
 627   // VM needs target method
 628   // This needs to be a long call since we will relocate this adapter to
 629   // the codeBuffer and it may not reach
 630 
 631   // Allocate argument register save area
 632   if (frame::arg_reg_save_area_bytes != 0) {
 633     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 634   }
 635   __ mov(c_rarg0, rbx);
 636   __ mov(c_rarg1, rax);
 637   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 638 
 639   // De-allocate argument register save area
 640   if (frame::arg_reg_save_area_bytes != 0) {
 641     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 642   }
 643 
 644   __ vzeroupper();
 645   __ pop_CPU_state();
 646   // restore sp
 647   __ mov(rsp, r13);
 648   __ bind(L);
 649 }
 650 
 651 // For each value type argument, sig includes the list of fields of
 652 // the value type. This utility function computes the number of
 653 // arguments for the call if value types are passed by reference (the
 654 // calling convention the interpreter expects).
 655 static int compute_total_args_passed_int(const GrowableArray<SigEntry>& sig_extended) {
 656   int total_args_passed = 0;
 657   if (ValueTypePassFieldsAsArgs) {
 658     for (int i = 0; i < sig_extended.length(); i++) {
 659       BasicType bt = sig_extended.at(i)._bt;
 660       if (bt == T_VALUETYPE) {
 661         // In sig_extended, a value type argument starts with:
 662         // T_VALUETYPE, followed by the types of the fields of the
 663         // value type and T_VOID to mark the end of the value
 664         // type. Value types are flattened so, for instance, in the
 665         // case of a value type with an int field and a value type
 666         // field that itself has 2 fields, an int and a long:
 667         // T_VALUETYPE T_INT T_VALUETYPE T_INT T_LONG T_VOID (second
 668         // slot for the T_LONG) T_VOID (inner T_VALUETYPE) T_VOID
 669         // (outer T_VALUETYPE)
 670         total_args_passed++;
 671         int vt = 1;
 672         do {
 673           i++;
 674           BasicType bt = sig_extended.at(i)._bt;
 675           BasicType prev_bt = sig_extended.at(i-1)._bt;
 676           if (bt == T_VALUETYPE) {
 677             vt++;
 678           } else if (bt == T_VOID &&
 679                      prev_bt != T_LONG &&
 680                      prev_bt != T_DOUBLE) {
 681             vt--;
 682           }
 683         } while (vt != 0);
 684       } else {
 685         total_args_passed++;
 686       }
 687     }
 688   } else {
 689     total_args_passed = sig_extended.length();
 690   }
 691   return total_args_passed;
 692 }
 693 
 694 
 695 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 696                                    BasicType bt,
 697                                    BasicType prev_bt,
 698                                    size_t size_in_bytes,
 699                                    const VMRegPair& reg_pair,
 700                                    const Address& to,
 701                                    int extraspace,
 702                                    bool is_oop) {
 703   assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here");
 704   if (bt == T_VOID) {
 705     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 706     return;
 707   }
 708 
 709   // Say 4 args:
 710   // i   st_off
 711   // 0   32 T_LONG
 712   // 1   24 T_VOID
 713   // 2   16 T_OBJECT
 714   // 3    8 T_BOOL
 715   // -    0 return address
 716   //
 717   // However to make thing extra confusing. Because we can fit a long/double in
 718   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 719   // leaves one slot empty and only stores to a single slot. In this case the
 720   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 721 
 722   bool wide = (size_in_bytes == wordSize);
 723   VMReg r_1 = reg_pair.first();
 724   VMReg r_2 = reg_pair.second();
 725   assert(r_2->is_valid() == wide, "invalid size");
 726   if (!r_1->is_valid()) {
 727     assert(!r_2->is_valid(), "must be invalid");
 728     return;
 729   }
 730 
 731   if (!r_1->is_XMMRegister()) {
 732     Register val = rax;
 733     assert_different_registers(to.base(), val);
 734     if(r_1->is_stack()) {
 735       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 736       __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 737     } else {
 738       val = r_1->as_Register();
 739     }
 740     if (is_oop) {
 741       __ store_heap_oop(to, val);
 742     } else {
 743       __ store_sized_value(to, val, size_in_bytes);
 744     }
 745   } else {
 746     if (wide) {
 747       __ movdbl(to, r_1->as_XMMRegister());
 748     } else {
 749       __ movflt(to, r_1->as_XMMRegister());
 750     }
 751   }
 752 }
 753 
 754 static void gen_c2i_adapter(MacroAssembler *masm,
 755                             const GrowableArray<SigEntry>& sig_extended,
 756                             const VMRegPair *regs,
 757                             Label& skip_fixup,
 758                             address start,
 759                             OopMapSet*& oop_maps,
 760                             int& frame_complete,
 761                             int& frame_size_in_words) {
 762   // Before we get into the guts of the C2I adapter, see if we should be here
 763   // at all.  We've come from compiled code and are attempting to jump to the
 764   // interpreter, which means the caller made a static call to get here
 765   // (vcalls always get a compiled target if there is one).  Check for a
 766   // compiled target.  If there is one, we need to patch the caller's call.
 767   patch_callers_callsite(masm);
 768 
 769   __ bind(skip_fixup);
 770 
 771   bool has_value_argument = false;
 772   if (ValueTypePassFieldsAsArgs) {
 773     // Is there a value type argument?
 774     for (int i = 0; i < sig_extended.length() && !has_value_argument; i++) {
 775       has_value_argument = (sig_extended.at(i)._bt == T_VALUETYPE);
 776     }
 777     if (has_value_argument) {
 778       // There is at least a value type argument: we're coming from
 779       // compiled code so we have no buffers to back the value
 780       // types. Allocate the buffers here with a runtime call.
 781       oop_maps = new OopMapSet();
 782       OopMap* map = NULL;
 783 
 784       map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
 785 
 786       frame_complete = __ offset();
 787 
 788       __ set_last_Java_frame(noreg, noreg, NULL);
 789 
 790       __ mov(c_rarg0, r15_thread);
 791       __ mov(c_rarg1, rbx);
 792 
 793       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_value_types)));
 794 
 795       oop_maps->add_gc_map((int)(__ pc() - start), map);
 796       __ reset_last_Java_frame(false);
 797 
 798       RegisterSaver::restore_live_registers(masm);
 799 
 800       Label no_exception;
 801       __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
 802       __ jcc(Assembler::equal, no_exception);
 803 
 804       __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
 805       __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
 806       __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 807 
 808       __ bind(no_exception);
 809 
 810       // We get an array of objects from the runtime call
 811       __ get_vm_result(r13, r15_thread); // Use r13 as temporary because r10 is trashed by movptr()
 812       __ get_vm_result_2(rbx, r15_thread); // TODO: required to keep the callee Method live?
 813       __ mov(r10, r13);
 814     }
 815   }
 816 
 817   // Since all args are passed on the stack, total_args_passed *
 818   // Interpreter::stackElementSize is the space we need. Plus 1 because
 819   // we also account for the return address location since
 820   // we store it first rather than hold it in rax across all the shuffling
 821   int total_args_passed = compute_total_args_passed_int(sig_extended);
 822   int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
 823 
 824   // stack is aligned, keep it that way
 825   extraspace = align_up(extraspace, 2*wordSize);
 826 
 827   // Get return address
 828   __ pop(rax);
 829 
 830   // set senderSP value
 831   __ mov(r13, rsp);
 832 
 833   __ subptr(rsp, extraspace);
 834 
 835   // Store the return address in the expected location
 836   __ movptr(Address(rsp, 0), rax);
 837 
 838   // Now write the args into the outgoing interpreter space
 839 
 840   // next_arg_comp is the next argument from the compiler point of
 841   // view (value type fields are passed in registers/on the stack). In
 842   // sig_extended, a value type argument starts with: T_VALUETYPE,
 843   // followed by the types of the fields of the value type and T_VOID
 844   // to mark the end of the value type. ignored counts the number of
 845   // T_VALUETYPE/T_VOID. next_vt_arg is the next value type argument:
 846   // used to get the buffer for that argument from the pool of buffers
 847   // we allocated above and want to pass to the
 848   // interpreter. next_arg_int is the next argument from the
 849   // interpreter point of view (value types are passed by reference).
 850   bool has_oop_field = false;
 851   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 852        next_arg_comp < sig_extended.length(); next_arg_comp++) {
 853     assert(ignored <= next_arg_comp, "shouldn't skip over more slot than there are arguments");
 854     assert(next_arg_int < total_args_passed, "more arguments for the interpreter than expected?");
 855     BasicType bt = sig_extended.at(next_arg_comp)._bt;
 856     int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize;
 857     if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {
 858       int next_off = st_off - Interpreter::stackElementSize;
 859       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 860       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 861       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 862       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
 863                              size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false);
 864       next_arg_int++;
 865 #ifdef ASSERT
 866       if (bt == T_LONG || bt == T_DOUBLE) {
 867         // Overwrite the unused slot with known junk
 868         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 869         __ movptr(Address(rsp, st_off), rax);
 870       }
 871 #endif /* ASSERT */
 872     } else {
 873       ignored++;
 874       // get the buffer from the just allocated pool of buffers
 875       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_VALUETYPE);
 876       __ load_heap_oop(r11, Address(r10, index));
 877       next_vt_arg++; next_arg_int++;
 878       int vt = 1;
 879       // write fields we get from compiled code in registers/stack
 880       // slots to the buffer: we know we are done with that value type
 881       // argument when we hit the T_VOID that acts as an end of value
 882       // type delimiter for this value type. Value types are flattened
 883       // so we might encounter embedded value types. Each entry in
 884       // sig_extended contains a field offset in the buffer.
 885       do {
 886         next_arg_comp++;
 887         BasicType bt = sig_extended.at(next_arg_comp)._bt;
 888         BasicType prev_bt = sig_extended.at(next_arg_comp-1)._bt;
 889         if (bt == T_VALUETYPE) {
 890           vt++;
 891           ignored++;
 892         } else if (bt == T_VOID &&
 893                    prev_bt != T_LONG &&
 894                    prev_bt != T_DOUBLE) {
 895           vt--;
 896           ignored++;
 897         } else {
 898           int off = sig_extended.at(next_arg_comp)._offset;
 899           assert(off > 0, "offset in object should be positive");
 900           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 901           bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY);
 902           has_oop_field = has_oop_field || is_oop;
 903           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
 904                                  size_in_bytes, regs[next_arg_comp-ignored], Address(r11, off), extraspace, is_oop);
 905         }
 906       } while (vt != 0);
 907       // pass the buffer to the interpreter
 908       __ movptr(Address(rsp, st_off), r11);
 909     }
 910   }
 911 
 912   // If a value type was allocated and initialized, apply post barrier to all oop fields
 913   if (has_value_argument && has_oop_field) {
 914     __ push(r13); // save senderSP
 915     __ push(rbx); // save callee
 916     // Allocate argument register save area
 917     if (frame::arg_reg_save_area_bytes != 0) {
 918       __ subptr(rsp, frame::arg_reg_save_area_bytes);
 919     }
 920     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::apply_post_barriers), r15_thread, r10);
 921     // De-allocate argument register save area
 922     if (frame::arg_reg_save_area_bytes != 0) {
 923       __ addptr(rsp, frame::arg_reg_save_area_bytes);
 924     }
 925     __ pop(rbx); // restore callee
 926     __ pop(r13); // restore sender SP
 927   }
 928 
 929   // Schedule the branch target address early.
 930   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 931   __ jmp(rcx);
 932 }
 933 
 934 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 935                         address code_start, address code_end,
 936                         Label& L_ok) {
 937   Label L_fail;
 938   __ lea(temp_reg, ExternalAddress(code_start));
 939   __ cmpptr(pc_reg, temp_reg);
 940   __ jcc(Assembler::belowEqual, L_fail);
 941   __ lea(temp_reg, ExternalAddress(code_end));
 942   __ cmpptr(pc_reg, temp_reg);
 943   __ jcc(Assembler::below, L_ok);
 944   __ bind(L_fail);
 945 }
 946 
 947 static void gen_i2c_adapter_helper(MacroAssembler* masm,
 948                                    BasicType bt,
 949                                    BasicType prev_bt,
 950                                    size_t size_in_bytes,
 951                                    const VMRegPair& reg_pair,
 952                                    const Address& from,
 953                                    bool is_oop) {
 954   assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here");
 955   if (bt == T_VOID) {
 956     // Longs and doubles are passed in native word order, but misaligned
 957     // in the 32-bit build.
 958     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 959     return;
 960   }
 961   assert(!reg_pair.second()->is_valid() || reg_pair.first()->next() == reg_pair.second(),
 962          "scrambled load targets?");
 963 
 964   bool wide = (size_in_bytes == wordSize);
 965   VMReg r_1 = reg_pair.first();
 966   VMReg r_2 = reg_pair.second();
 967   assert(r_2->is_valid() == wide, "invalid size");
 968   if (!r_1->is_valid()) {
 969     assert(!r_2->is_valid(), "must be invalid");
 970     return;
 971   }
 972 
 973   bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 974   if (!r_1->is_XMMRegister()) {
 975     // We can use r13 as a temp here because compiled code doesn't need r13 as an input
 976     // and if we end up going thru a c2i because of a miss a reasonable value of r13
 977     // will be generated.
 978     Register dst = r_1->is_stack() ? r13 : r_1->as_Register();
 979     if (is_oop) {
 980       __ load_heap_oop(dst, from);
 981     } else {
 982       __ load_sized_value(dst, from, size_in_bytes, is_signed);
 983     }
 984     if (r_1->is_stack()) {
 985       // Convert stack slot to an SP offset (+ wordSize to account for return address)
 986       int st_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 987       __ movq(Address(rsp, st_off), dst);
 988     }
 989   } else {
 990     if (wide) {
 991       __ movdbl(r_1->as_XMMRegister(), from);
 992     } else {
 993       __ movflt(r_1->as_XMMRegister(), from);
 994     }
 995   }
 996 }
 997 
 998 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 999                                     int comp_args_on_stack,
1000                                     const GrowableArray<SigEntry>& sig_extended,
1001                                     const VMRegPair *regs) {
1002 
1003   // Note: r13 contains the senderSP on entry. We must preserve it since
1004   // we may do a i2c -> c2i transition if we lose a race where compiled
1005   // code goes non-entrant while we get args ready.
1006   // In addition we use r13 to locate all the interpreter args as
1007   // we must align the stack to 16 bytes on an i2c entry else we
1008   // lose alignment we expect in all compiled code and register
1009   // save code can segv when fxsave instructions find improperly
1010   // aligned stack pointer.
1011 
1012   // Adapters can be frameless because they do not require the caller
1013   // to perform additional cleanup work, such as correcting the stack pointer.
1014   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
1015   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
1016   // even if a callee has modified the stack pointer.
1017   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
1018   // routinely repairs its caller's stack pointer (from sender_sp, which is set
1019   // up via the senderSP register).
1020   // In other words, if *either* the caller or callee is interpreted, we can
1021   // get the stack pointer repaired after a call.
1022   // This is why c2i and i2c adapters cannot be indefinitely composed.
1023   // In particular, if a c2i adapter were to somehow call an i2c adapter,
1024   // both caller and callee would be compiled methods, and neither would
1025   // clean up the stack pointer changes performed by the two adapters.
1026   // If this happens, control eventually transfers back to the compiled
1027   // caller, but with an uncorrected stack, causing delayed havoc.
1028 
1029   // Pick up the return address
1030   __ movptr(rax, Address(rsp, 0));
1031 
1032   if (VerifyAdapterCalls &&
1033       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
1034     // So, let's test for cascading c2i/i2c adapters right now.
1035     //  assert(Interpreter::contains($return_addr) ||
1036     //         StubRoutines::contains($return_addr),
1037     //         "i2c adapter must return to an interpreter frame");
1038     __ block_comment("verify_i2c { ");
1039     Label L_ok;
1040     if (Interpreter::code() != NULL)
1041       range_check(masm, rax, r11,
1042                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
1043                   L_ok);
1044     if (StubRoutines::code1() != NULL)
1045       range_check(masm, rax, r11,
1046                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
1047                   L_ok);
1048     if (StubRoutines::code2() != NULL)
1049       range_check(masm, rax, r11,
1050                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
1051                   L_ok);
1052     const char* msg = "i2c adapter must return to an interpreter frame";
1053     __ block_comment(msg);
1054     __ stop(msg);
1055     __ bind(L_ok);
1056     __ block_comment("} verify_i2ce ");
1057   }
1058 
1059   // Must preserve original SP for loading incoming arguments because
1060   // we need to align the outgoing SP for compiled code.
1061   __ movptr(r11, rsp);
1062 
1063   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
1064   // in registers, we will occasionally have no stack args.
1065   int comp_words_on_stack = 0;
1066   if (comp_args_on_stack) {
1067     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
1068     // registers are below.  By subtracting stack0, we either get a negative
1069     // number (all values in registers) or the maximum stack slot accessed.
1070 
1071     // Convert 4-byte c2 stack slots to words.
1072     comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1073     // Round up to miminum stack alignment, in wordSize
1074     comp_words_on_stack = align_up(comp_words_on_stack, 2);
1075     __ subptr(rsp, comp_words_on_stack * wordSize);
1076   }
1077 
1078 
1079   // Ensure compiled code always sees stack at proper alignment
1080   __ andptr(rsp, -16);
1081 
1082   // push the return address and misalign the stack that youngest frame always sees
1083   // as far as the placement of the call instruction
1084   __ push(rax);
1085 
1086   // Put saved SP in another register
1087   const Register saved_sp = rax;
1088   __ movptr(saved_sp, r11);
1089 
1090   // Will jump to the compiled code just as if compiled code was doing it.
1091   // Pre-load the register-jump target early, to schedule it better.
1092   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
1093 
1094 #if INCLUDE_JVMCI
1095   if (EnableJVMCI || UseAOT) {
1096     // check if this call should be routed towards a specific entry point
1097     __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1098     Label no_alternative_target;
1099     __ jcc(Assembler::equal, no_alternative_target);
1100     __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1101     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1102     __ bind(no_alternative_target);
1103   }
1104 #endif // INCLUDE_JVMCI
1105 
1106   int total_args_passed = compute_total_args_passed_int(sig_extended);
1107   // Now generate the shuffle code.  Pick up all register args and move the
1108   // rest through the floating point stack top.
1109 
1110   // next_arg_comp is the next argument from the compiler point of
1111   // view (value type fields are passed in registers/on the stack). In
1112   // sig_extended, a value type argument starts with: T_VALUETYPE,
1113   // followed by the types of the fields of the value type and T_VOID
1114   // to mark the end of the value type. ignored counts the number of
1115   // T_VALUETYPE/T_VOID. next_arg_int is the next argument from the
1116   // interpreter point of view (value types are passed by reference).
1117   for (int next_arg_comp = 0, ignored = 0, next_arg_int = 0; next_arg_comp < sig_extended.length(); next_arg_comp++) {
1118     assert(ignored <= next_arg_comp, "shouldn't skip over more slot than there are arguments");
1119     assert(next_arg_int < total_args_passed, "more arguments from the interpreter than expected?");
1120     BasicType bt = sig_extended.at(next_arg_comp)._bt;
1121     int ld_off = (total_args_passed - next_arg_int)*Interpreter::stackElementSize;
1122     if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {
1123       // Load in argument order going down.
1124       // Point to interpreter value (vs. tag)
1125       int next_off = ld_off - Interpreter::stackElementSize;
1126       int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
1127       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
1128       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
1129       gen_i2c_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
1130                              size_in_bytes, reg_pair, Address(saved_sp, offset), false);
1131       next_arg_int++;
1132     } else {
1133       next_arg_int++;
1134       ignored++;
1135       // get the buffer for that value type
1136       __ movptr(r10, Address(saved_sp, ld_off));
1137       int vt = 1;
1138       // load fields to registers/stack slots from the buffer: we know
1139       // we are done with that value type argument when we hit the
1140       // T_VOID that acts as an end of value type delimiter for this
1141       // value type. Value types are flattened so we might encounter
1142       // embedded value types. Each entry in sig_extended contains a
1143       // field offset in the buffer.
1144       do {
1145         next_arg_comp++;
1146         BasicType bt = sig_extended.at(next_arg_comp)._bt;
1147         BasicType prev_bt = sig_extended.at(next_arg_comp-1)._bt;
1148         if (bt == T_VALUETYPE) {
1149           vt++;
1150           ignored++;
1151         } else if (bt == T_VOID &&
1152                    prev_bt != T_LONG &&
1153                    prev_bt != T_DOUBLE) {
1154           vt--;
1155           ignored++;
1156         } else {
1157           int off = sig_extended.at(next_arg_comp)._offset;
1158           assert(off > 0, "offset in object should be positive");
1159           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
1160           bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY);
1161           gen_i2c_adapter_helper(masm, bt, prev_bt, size_in_bytes, regs[next_arg_comp - ignored], Address(r10, off), is_oop);
1162         }
1163       } while (vt != 0);
1164     }
1165   }
1166 
1167   // 6243940 We might end up in handle_wrong_method if
1168   // the callee is deoptimized as we race thru here. If that
1169   // happens we don't want to take a safepoint because the
1170   // caller frame will look interpreted and arguments are now
1171   // "compiled" so it is much better to make this transition
1172   // invisible to the stack walking code. Unfortunately if
1173   // we try and find the callee by normal means a safepoint
1174   // is possible. So we stash the desired callee in the thread
1175   // and the vm will find there should this case occur.
1176 
1177   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1178 
1179   // put Method* where a c2i would expect should we end up there
1180   // only needed because of c2 resolve stubs return Method* as a result in
1181   // rax
1182   __ mov(rax, rbx);
1183   __ jmp(r11);
1184 }
1185 
1186 // ---------------------------------------------------------------
1187 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1188                                                             int comp_args_on_stack,
1189                                                             const GrowableArray<SigEntry>& sig_extended,
1190                                                             const VMRegPair *regs,
1191                                                             AdapterFingerPrint* fingerprint,
1192                                                             AdapterBlob*& new_adapter) {
1193   address i2c_entry = __ pc();
1194 
1195   gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);
1196 
1197   // -------------------------------------------------------------------------
1198   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
1199   // to the interpreter.  The args start out packed in the compiled layout.  They
1200   // need to be unpacked into the interpreter layout.  This will almost always
1201   // require some stack space.  We grow the current (compiled) stack, then repack
1202   // the args.  We  finally end in a jump to the generic interpreter entry point.
1203   // On exit from the interpreter, the interpreter will restore our SP (lest the
1204   // compiled code, which relys solely on SP and not RBP, get sick).
1205 
1206   address c2i_unverified_entry = __ pc();
1207   Label skip_fixup;
1208   Label ok;
1209 
1210   Register holder = rax;
1211   Register receiver = j_rarg0;
1212   Register temp = rbx;
1213 
1214   {
1215     __ load_klass(temp, receiver);
1216     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
1217     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
1218     __ jcc(Assembler::equal, ok);
1219     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1220 
1221     __ bind(ok);
1222     // Method might have been compiled since the call site was patched to
1223     // interpreted if that is the case treat it as a miss so we can get
1224     // the call site corrected.
1225     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
1226     __ jcc(Assembler::equal, skip_fixup);
1227     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1228   }
1229 
1230   address c2i_entry = __ pc();
1231 
1232   OopMapSet* oop_maps = NULL;
1233   int frame_complete = CodeOffsets::frame_never_safe;
1234   int frame_size_in_words = 0;
1235   gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
1236 
1237   __ flush();
1238   new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
1239 
1240   // If the method has value types arguments, save the extended signature as symbol in
1241   // the AdapterHandlerEntry to be used for scalarization of value type arguments.
1242   Symbol* extended_signature = NULL;
1243   bool has_value_argument = false;
1244   Thread* THREAD = Thread::current();
1245   ResourceMark rm(THREAD);
1246   int length = sig_extended.length();
1247   char* sig_str = NEW_RESOURCE_ARRAY(char, 2*length + 3);
1248   int idx = 0;
1249   sig_str[idx++] = '(';
1250   for (int index = 0; index < length; index++) {
1251     BasicType bt = sig_extended.at(index)._bt;
1252     if (bt == T_VALUETYPE) {
1253       has_value_argument = true;
1254     } else if (bt == T_VALUETYPEPTR) {
1255       has_value_argument = true;
1256       // non-flattened value type field
1257       sig_str[idx++] = type2char(T_VALUETYPE);
1258       sig_str[idx++] = ';';
1259     } else if (bt == T_VOID) {
1260       // Ignore
1261     } else {
1262       if (bt == T_ARRAY) {
1263         bt = T_OBJECT; // We don't know the element type, treat as Object
1264       }
1265       sig_str[idx++] = type2char(bt);
1266       if (bt == T_OBJECT) {
1267         sig_str[idx++] = ';';
1268       }
1269     }
1270   }
1271   sig_str[idx++] = ')';
1272   sig_str[idx++] = '\0';
1273   if (has_value_argument) {
1274     // Extended signature is only required if a value type argument is passed
1275     extended_signature = SymbolTable::new_permanent_symbol(sig_str, THREAD);
1276   }
1277 
1278   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, extended_signature);
1279 }
1280 
1281 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1282                                          VMRegPair *regs,
1283                                          VMRegPair *regs2,
1284                                          int total_args_passed) {
1285   assert(regs2 == NULL, "not needed on x86");
1286 // We return the amount of VMRegImpl stack slots we need to reserve for all
1287 // the arguments NOT counting out_preserve_stack_slots.
1288 
1289 // NOTE: These arrays will have to change when c1 is ported
1290 #ifdef _WIN64
1291     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1292       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1293     };
1294     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1295       c_farg0, c_farg1, c_farg2, c_farg3
1296     };
1297 #else
1298     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1299       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
1300     };
1301     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1302       c_farg0, c_farg1, c_farg2, c_farg3,
1303       c_farg4, c_farg5, c_farg6, c_farg7
1304     };
1305 #endif // _WIN64
1306 
1307 
1308     uint int_args = 0;
1309     uint fp_args = 0;
1310     uint stk_args = 0; // inc by 2 each time
1311 
1312     for (int i = 0; i < total_args_passed; i++) {
1313       switch (sig_bt[i]) {
1314       case T_BOOLEAN:
1315       case T_CHAR:
1316       case T_BYTE:
1317       case T_SHORT:
1318       case T_INT:
1319         if (int_args < Argument::n_int_register_parameters_c) {
1320           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1321 #ifdef _WIN64
1322           fp_args++;
1323           // Allocate slots for callee to stuff register args the stack.
1324           stk_args += 2;
1325 #endif
1326         } else {
1327           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1328           stk_args += 2;
1329         }
1330         break;
1331       case T_LONG:
1332         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1333         // fall through
1334       case T_OBJECT:
1335       case T_ARRAY:
1336       case T_ADDRESS:
1337       case T_METADATA:
1338         if (int_args < Argument::n_int_register_parameters_c) {
1339           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1340 #ifdef _WIN64
1341           fp_args++;
1342           stk_args += 2;
1343 #endif
1344         } else {
1345           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1346           stk_args += 2;
1347         }
1348         break;
1349       case T_FLOAT:
1350         if (fp_args < Argument::n_float_register_parameters_c) {
1351           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1352 #ifdef _WIN64
1353           int_args++;
1354           // Allocate slots for callee to stuff register args the stack.
1355           stk_args += 2;
1356 #endif
1357         } else {
1358           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1359           stk_args += 2;
1360         }
1361         break;
1362       case T_DOUBLE:
1363         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1364         if (fp_args < Argument::n_float_register_parameters_c) {
1365           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1366 #ifdef _WIN64
1367           int_args++;
1368           // Allocate slots for callee to stuff register args the stack.
1369           stk_args += 2;
1370 #endif
1371         } else {
1372           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1373           stk_args += 2;
1374         }
1375         break;
1376       case T_VOID: // Halves of longs and doubles
1377         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1378         regs[i].set_bad();
1379         break;
1380       default:
1381         ShouldNotReachHere();
1382         break;
1383       }
1384     }
1385 #ifdef _WIN64
1386   // windows abi requires that we always allocate enough stack space
1387   // for 4 64bit registers to be stored down.
1388   if (stk_args < 8) {
1389     stk_args = 8;
1390   }
1391 #endif // _WIN64
1392 
1393   return stk_args;
1394 }
1395 
1396 // On 64 bit we will store integer like items to the stack as
1397 // 64 bits items (sparc abi) even though java would only store
1398 // 32bits for a parameter. On 32bit it will simply be 32 bits
1399 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1400 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1401   if (src.first()->is_stack()) {
1402     if (dst.first()->is_stack()) {
1403       // stack to stack
1404       __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1405       __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1406     } else {
1407       // stack to reg
1408       __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1409     }
1410   } else if (dst.first()->is_stack()) {
1411     // reg to stack
1412     // Do we really have to sign extend???
1413     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1414     __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1415   } else {
1416     // Do we really have to sign extend???
1417     // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1418     if (dst.first() != src.first()) {
1419       __ movq(dst.first()->as_Register(), src.first()->as_Register());
1420     }
1421   }
1422 }
1423 
1424 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1425   if (src.first()->is_stack()) {
1426     if (dst.first()->is_stack()) {
1427       // stack to stack
1428       __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1429       __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1430     } else {
1431       // stack to reg
1432       __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1433     }
1434   } else if (dst.first()->is_stack()) {
1435     // reg to stack
1436     __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1437   } else {
1438     if (dst.first() != src.first()) {
1439       __ movq(dst.first()->as_Register(), src.first()->as_Register());
1440     }
1441   }
1442 }
1443 
1444 // An oop arg. Must pass a handle not the oop itself
1445 static void object_move(MacroAssembler* masm,
1446                         OopMap* map,
1447                         int oop_handle_offset,
1448                         int framesize_in_slots,
1449                         VMRegPair src,
1450                         VMRegPair dst,
1451                         bool is_receiver,
1452                         int* receiver_offset) {
1453 
1454   // must pass a handle. First figure out the location we use as a handle
1455 
1456   Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1457 
1458   // See if oop is NULL if it is we need no handle
1459 
1460   if (src.first()->is_stack()) {
1461 
1462     // Oop is already on the stack as an argument
1463     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1464     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1465     if (is_receiver) {
1466       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1467     }
1468 
1469     __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1470     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1471     // conditionally move a NULL
1472     __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1473   } else {
1474 
1475     // Oop is in an a register we must store it to the space we reserve
1476     // on the stack for oop_handles and pass a handle if oop is non-NULL
1477 
1478     const Register rOop = src.first()->as_Register();
1479     int oop_slot;
1480     if (rOop == j_rarg0)
1481       oop_slot = 0;
1482     else if (rOop == j_rarg1)
1483       oop_slot = 1;
1484     else if (rOop == j_rarg2)
1485       oop_slot = 2;
1486     else if (rOop == j_rarg3)
1487       oop_slot = 3;
1488     else if (rOop == j_rarg4)
1489       oop_slot = 4;
1490     else {
1491       assert(rOop == j_rarg5, "wrong register");
1492       oop_slot = 5;
1493     }
1494 
1495     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1496     int offset = oop_slot*VMRegImpl::stack_slot_size;
1497 
1498     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1499     // Store oop in handle area, may be NULL
1500     __ movptr(Address(rsp, offset), rOop);
1501     if (is_receiver) {
1502       *receiver_offset = offset;
1503     }
1504 
1505     __ cmpptr(rOop, (int32_t)NULL_WORD);
1506     __ lea(rHandle, Address(rsp, offset));
1507     // conditionally move a NULL from the handle area where it was just stored
1508     __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1509   }
1510 
1511   // If arg is on the stack then place it otherwise it is already in correct reg.
1512   if (dst.first()->is_stack()) {
1513     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1514   }
1515 }
1516 
1517 // A float arg may have to do float reg int reg conversion
1518 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1519   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1520 
1521   // The calling conventions assures us that each VMregpair is either
1522   // all really one physical register or adjacent stack slots.
1523   // This greatly simplifies the cases here compared to sparc.
1524 
1525   if (src.first()->is_stack()) {
1526     if (dst.first()->is_stack()) {
1527       __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1528       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1529     } else {
1530       // stack to reg
1531       assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1532       __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1533     }
1534   } else if (dst.first()->is_stack()) {
1535     // reg to stack
1536     assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1537     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1538   } else {
1539     // reg to reg
1540     // In theory these overlap but the ordering is such that this is likely a nop
1541     if ( src.first() != dst.first()) {
1542       __ movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
1543     }
1544   }
1545 }
1546 
1547 // A long move
1548 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1549 
1550   // The calling conventions assures us that each VMregpair is either
1551   // all really one physical register or adjacent stack slots.
1552   // This greatly simplifies the cases here compared to sparc.
1553 
1554   if (src.is_single_phys_reg() ) {
1555     if (dst.is_single_phys_reg()) {
1556       if (dst.first() != src.first()) {
1557         __ mov(dst.first()->as_Register(), src.first()->as_Register());
1558       }
1559     } else {
1560       assert(dst.is_single_reg(), "not a stack pair");
1561       __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1562     }
1563   } else if (dst.is_single_phys_reg()) {
1564     assert(src.is_single_reg(),  "not a stack pair");
1565     __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1566   } else {
1567     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1568     __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1569     __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1570   }
1571 }
1572 
1573 // A double move
1574 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1575 
1576   // The calling conventions assures us that each VMregpair is either
1577   // all really one physical register or adjacent stack slots.
1578   // This greatly simplifies the cases here compared to sparc.
1579 
1580   if (src.is_single_phys_reg() ) {
1581     if (dst.is_single_phys_reg()) {
1582       // In theory these overlap but the ordering is such that this is likely a nop
1583       if ( src.first() != dst.first()) {
1584         __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1585       }
1586     } else {
1587       assert(dst.is_single_reg(), "not a stack pair");
1588       __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1589     }
1590   } else if (dst.is_single_phys_reg()) {
1591     assert(src.is_single_reg(),  "not a stack pair");
1592     __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1593   } else {
1594     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1595     __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1596     __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1597   }
1598 }
1599 
1600 
1601 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1602   // We always ignore the frame_slots arg and just use the space just below frame pointer
1603   // which by this time is free to use
1604   switch (ret_type) {
1605   case T_FLOAT:
1606     __ movflt(Address(rbp, -wordSize), xmm0);
1607     break;
1608   case T_DOUBLE:
1609     __ movdbl(Address(rbp, -wordSize), xmm0);
1610     break;
1611   case T_VOID:  break;
1612   default: {
1613     __ movptr(Address(rbp, -wordSize), rax);
1614     }
1615   }
1616 }
1617 
1618 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1619   // We always ignore the frame_slots arg and just use the space just below frame pointer
1620   // which by this time is free to use
1621   switch (ret_type) {
1622   case T_FLOAT:
1623     __ movflt(xmm0, Address(rbp, -wordSize));
1624     break;
1625   case T_DOUBLE:
1626     __ movdbl(xmm0, Address(rbp, -wordSize));
1627     break;
1628   case T_VOID:  break;
1629   default: {
1630     __ movptr(rax, Address(rbp, -wordSize));
1631     }
1632   }
1633 }
1634 
1635 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1636     for ( int i = first_arg ; i < arg_count ; i++ ) {
1637       if (args[i].first()->is_Register()) {
1638         __ push(args[i].first()->as_Register());
1639       } else if (args[i].first()->is_XMMRegister()) {
1640         __ subptr(rsp, 2*wordSize);
1641         __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1642       }
1643     }
1644 }
1645 
1646 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1647     for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1648       if (args[i].first()->is_Register()) {
1649         __ pop(args[i].first()->as_Register());
1650       } else if (args[i].first()->is_XMMRegister()) {
1651         __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1652         __ addptr(rsp, 2*wordSize);
1653       }
1654     }
1655 }
1656 
1657 
1658 static void save_or_restore_arguments(MacroAssembler* masm,
1659                                       const int stack_slots,
1660                                       const int total_in_args,
1661                                       const int arg_save_area,
1662                                       OopMap* map,
1663                                       VMRegPair* in_regs,
1664                                       BasicType* in_sig_bt) {
1665   // if map is non-NULL then the code should store the values,
1666   // otherwise it should load them.
1667   int slot = arg_save_area;
1668   // Save down double word first
1669   for ( int i = 0; i < total_in_args; i++) {
1670     if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1671       int offset = slot * VMRegImpl::stack_slot_size;
1672       slot += VMRegImpl::slots_per_word;
1673       assert(slot <= stack_slots, "overflow");
1674       if (map != NULL) {
1675         __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1676       } else {
1677         __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1678       }
1679     }
1680     if (in_regs[i].first()->is_Register() &&
1681         (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1682       int offset = slot * VMRegImpl::stack_slot_size;
1683       if (map != NULL) {
1684         __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1685         if (in_sig_bt[i] == T_ARRAY) {
1686           map->set_oop(VMRegImpl::stack2reg(slot));;
1687         }
1688       } else {
1689         __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1690       }
1691       slot += VMRegImpl::slots_per_word;
1692     }
1693   }
1694   // Save or restore single word registers
1695   for ( int i = 0; i < total_in_args; i++) {
1696     if (in_regs[i].first()->is_Register()) {
1697       int offset = slot * VMRegImpl::stack_slot_size;
1698       slot++;
1699       assert(slot <= stack_slots, "overflow");
1700 
1701       // Value is in an input register pass we must flush it to the stack
1702       const Register reg = in_regs[i].first()->as_Register();
1703       switch (in_sig_bt[i]) {
1704         case T_BOOLEAN:
1705         case T_CHAR:
1706         case T_BYTE:
1707         case T_SHORT:
1708         case T_INT:
1709           if (map != NULL) {
1710             __ movl(Address(rsp, offset), reg);
1711           } else {
1712             __ movl(reg, Address(rsp, offset));
1713           }
1714           break;
1715         case T_ARRAY:
1716         case T_LONG:
1717           // handled above
1718           break;
1719         case T_OBJECT:
1720         default: ShouldNotReachHere();
1721       }
1722     } else if (in_regs[i].first()->is_XMMRegister()) {
1723       if (in_sig_bt[i] == T_FLOAT) {
1724         int offset = slot * VMRegImpl::stack_slot_size;
1725         slot++;
1726         assert(slot <= stack_slots, "overflow");
1727         if (map != NULL) {
1728           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1729         } else {
1730           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1731         }
1732       }
1733     } else if (in_regs[i].first()->is_stack()) {
1734       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1735         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1736         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1737       }
1738     }
1739   }
1740 }
1741 
1742 // Pin object, return pinned object or null in rax
1743 static void gen_pin_object(MacroAssembler* masm,
1744                            VMRegPair reg) {
1745   __ block_comment("gen_pin_object {");
1746 
1747   // rax always contains oop, either incoming or
1748   // pinned.
1749   Register tmp_reg = rax;
1750 
1751   Label is_null;
1752   VMRegPair tmp;
1753   VMRegPair in_reg = reg;
1754 
1755   tmp.set_ptr(tmp_reg->as_VMReg());
1756   if (reg.first()->is_stack()) {
1757     // Load the arg up from the stack
1758     move_ptr(masm, reg, tmp);
1759     reg = tmp;
1760   } else {
1761     __ movptr(rax, reg.first()->as_Register());
1762   }
1763   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1764   __ jccb(Assembler::equal, is_null);
1765 
1766   if (reg.first()->as_Register() != c_rarg1) {
1767     __ movptr(c_rarg1, reg.first()->as_Register());
1768   }
1769 
1770   __ call_VM_leaf(
1771     CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
1772     r15_thread, c_rarg1);
1773 
1774   __ bind(is_null);
1775   __ block_comment("} gen_pin_object");
1776 }
1777 
1778 // Unpin object
1779 static void gen_unpin_object(MacroAssembler* masm,
1780                              VMRegPair reg) {
1781   __ block_comment("gen_unpin_object {");
1782   Label is_null;
1783 
1784   if (reg.first()->is_stack()) {
1785     __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
1786   } else if (reg.first()->as_Register() != c_rarg1) {
1787     __ movptr(c_rarg1, reg.first()->as_Register());
1788   }
1789 
1790   __ testptr(c_rarg1, c_rarg1);
1791   __ jccb(Assembler::equal, is_null);
1792 
1793   __ call_VM_leaf(
1794     CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
1795     r15_thread, c_rarg1);
1796 
1797   __ bind(is_null);
1798   __ block_comment("} gen_unpin_object");
1799 }
1800 
1801 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1802 // keeps a new JNI critical region from starting until a GC has been
1803 // forced.  Save down any oops in registers and describe them in an
1804 // OopMap.
1805 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1806                                                int stack_slots,
1807                                                int total_c_args,
1808                                                int total_in_args,
1809                                                int arg_save_area,
1810                                                OopMapSet* oop_maps,
1811                                                VMRegPair* in_regs,
1812                                                BasicType* in_sig_bt) {
1813   __ block_comment("check GCLocker::needs_gc");
1814   Label cont;
1815   __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1816   __ jcc(Assembler::equal, cont);
1817 
1818   // Save down any incoming oops and call into the runtime to halt for a GC
1819 
1820   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1821   save_or_restore_arguments(masm, stack_slots, total_in_args,
1822                             arg_save_area, map, in_regs, in_sig_bt);
1823 
1824   address the_pc = __ pc();
1825   oop_maps->add_gc_map( __ offset(), map);
1826   __ set_last_Java_frame(rsp, noreg, the_pc);
1827 
1828   __ block_comment("block_for_jni_critical");
1829   __ movptr(c_rarg0, r15_thread);
1830   __ mov(r12, rsp); // remember sp
1831   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1832   __ andptr(rsp, -16); // align stack as required by ABI
1833   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1834   __ mov(rsp, r12); // restore sp
1835   __ reinit_heapbase();
1836 
1837   __ reset_last_Java_frame(false);
1838 
1839   save_or_restore_arguments(masm, stack_slots, total_in_args,
1840                             arg_save_area, NULL, in_regs, in_sig_bt);
1841   __ bind(cont);
1842 #ifdef ASSERT
1843   if (StressCriticalJNINatives) {
1844     // Stress register saving
1845     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1846     save_or_restore_arguments(masm, stack_slots, total_in_args,
1847                               arg_save_area, map, in_regs, in_sig_bt);
1848     // Destroy argument registers
1849     for (int i = 0; i < total_in_args - 1; i++) {
1850       if (in_regs[i].first()->is_Register()) {
1851         const Register reg = in_regs[i].first()->as_Register();
1852         __ xorptr(reg, reg);
1853       } else if (in_regs[i].first()->is_XMMRegister()) {
1854         __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1855       } else if (in_regs[i].first()->is_FloatRegister()) {
1856         ShouldNotReachHere();
1857       } else if (in_regs[i].first()->is_stack()) {
1858         // Nothing to do
1859       } else {
1860         ShouldNotReachHere();
1861       }
1862       if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1863         i++;
1864       }
1865     }
1866 
1867     save_or_restore_arguments(masm, stack_slots, total_in_args,
1868                               arg_save_area, NULL, in_regs, in_sig_bt);
1869   }
1870 #endif
1871 }
1872 
1873 // Unpack an array argument into a pointer to the body and the length
1874 // if the array is non-null, otherwise pass 0 for both.
1875 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1876   Register tmp_reg = rax;
1877   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1878          "possible collision");
1879   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1880          "possible collision");
1881 
1882   __ block_comment("unpack_array_argument {");
1883 
1884   // Pass the length, ptr pair
1885   Label is_null, done;
1886   VMRegPair tmp;
1887   tmp.set_ptr(tmp_reg->as_VMReg());
1888   if (reg.first()->is_stack()) {
1889     // Load the arg up from the stack
1890     move_ptr(masm, reg, tmp);
1891     reg = tmp;
1892   }
1893   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1894   __ jccb(Assembler::equal, is_null);
1895   __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1896   move_ptr(masm, tmp, body_arg);
1897   // load the length relative to the body.
1898   __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1899                            arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1900   move32_64(masm, tmp, length_arg);
1901   __ jmpb(done);
1902   __ bind(is_null);
1903   // Pass zeros
1904   __ xorptr(tmp_reg, tmp_reg);
1905   move_ptr(masm, tmp, body_arg);
1906   move32_64(masm, tmp, length_arg);
1907   __ bind(done);
1908 
1909   __ block_comment("} unpack_array_argument");
1910 }
1911 
1912 
1913 // Different signatures may require very different orders for the move
1914 // to avoid clobbering other arguments.  There's no simple way to
1915 // order them safely.  Compute a safe order for issuing stores and
1916 // break any cycles in those stores.  This code is fairly general but
1917 // it's not necessary on the other platforms so we keep it in the
1918 // platform dependent code instead of moving it into a shared file.
1919 // (See bugs 7013347 & 7145024.)
1920 // Note that this code is specific to LP64.
1921 class ComputeMoveOrder: public StackObj {
1922   class MoveOperation: public ResourceObj {
1923     friend class ComputeMoveOrder;
1924    private:
1925     VMRegPair        _src;
1926     VMRegPair        _dst;
1927     int              _src_index;
1928     int              _dst_index;
1929     bool             _processed;
1930     MoveOperation*  _next;
1931     MoveOperation*  _prev;
1932 
1933     static int get_id(VMRegPair r) {
1934       return r.first()->value();
1935     }
1936 
1937    public:
1938     MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1939       _src(src)
1940     , _dst(dst)
1941     , _src_index(src_index)
1942     , _dst_index(dst_index)
1943     , _processed(false)
1944     , _next(NULL)
1945     , _prev(NULL) {
1946     }
1947 
1948     VMRegPair src() const              { return _src; }
1949     int src_id() const                 { return get_id(src()); }
1950     int src_index() const              { return _src_index; }
1951     VMRegPair dst() const              { return _dst; }
1952     void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1953     int dst_index() const              { return _dst_index; }
1954     int dst_id() const                 { return get_id(dst()); }
1955     MoveOperation* next() const       { return _next; }
1956     MoveOperation* prev() const       { return _prev; }
1957     void set_processed()               { _processed = true; }
1958     bool is_processed() const          { return _processed; }
1959 
1960     // insert
1961     void break_cycle(VMRegPair temp_register) {
1962       // create a new store following the last store
1963       // to move from the temp_register to the original
1964       MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1965 
1966       // break the cycle of links and insert new_store at the end
1967       // break the reverse link.
1968       MoveOperation* p = prev();
1969       assert(p->next() == this, "must be");
1970       _prev = NULL;
1971       p->_next = new_store;
1972       new_store->_prev = p;
1973 
1974       // change the original store to save it's value in the temp.
1975       set_dst(-1, temp_register);
1976     }
1977 
1978     void link(GrowableArray<MoveOperation*>& killer) {
1979       // link this store in front the store that it depends on
1980       MoveOperation* n = killer.at_grow(src_id(), NULL);
1981       if (n != NULL) {
1982         assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1983         _next = n;
1984         n->_prev = this;
1985       }
1986     }
1987   };
1988 
1989  private:
1990   GrowableArray<MoveOperation*> edges;
1991 
1992  public:
1993   ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1994                     BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1995     // Move operations where the dest is the stack can all be
1996     // scheduled first since they can't interfere with the other moves.
1997     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1998       if (in_sig_bt[i] == T_ARRAY) {
1999         c_arg--;
2000         if (out_regs[c_arg].first()->is_stack() &&
2001             out_regs[c_arg + 1].first()->is_stack()) {
2002           arg_order.push(i);
2003           arg_order.push(c_arg);
2004         } else {
2005           if (out_regs[c_arg].first()->is_stack() ||
2006               in_regs[i].first() == out_regs[c_arg].first()) {
2007             add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
2008           } else {
2009             add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
2010           }
2011         }
2012       } else if (in_sig_bt[i] == T_VOID) {
2013         arg_order.push(i);
2014         arg_order.push(c_arg);
2015       } else {
2016         if (out_regs[c_arg].first()->is_stack() ||
2017             in_regs[i].first() == out_regs[c_arg].first()) {
2018           arg_order.push(i);
2019           arg_order.push(c_arg);
2020         } else {
2021           add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
2022         }
2023       }
2024     }
2025     // Break any cycles in the register moves and emit the in the
2026     // proper order.
2027     GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
2028     for (int i = 0; i < stores->length(); i++) {
2029       arg_order.push(stores->at(i)->src_index());
2030       arg_order.push(stores->at(i)->dst_index());
2031     }
2032  }
2033 
2034   // Collected all the move operations
2035   void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
2036     if (src.first() == dst.first()) return;
2037     edges.append(new MoveOperation(src_index, src, dst_index, dst));
2038   }
2039 
2040   // Walk the edges breaking cycles between moves.  The result list
2041   // can be walked in order to produce the proper set of loads
2042   GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
2043     // Record which moves kill which values
2044     GrowableArray<MoveOperation*> killer;
2045     for (int i = 0; i < edges.length(); i++) {
2046       MoveOperation* s = edges.at(i);
2047       assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
2048       killer.at_put_grow(s->dst_id(), s, NULL);
2049     }
2050     assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
2051            "make sure temp isn't in the registers that are killed");
2052 
2053     // create links between loads and stores
2054     for (int i = 0; i < edges.length(); i++) {
2055       edges.at(i)->link(killer);
2056     }
2057 
2058     // at this point, all the move operations are chained together
2059     // in a doubly linked list.  Processing it backwards finds
2060     // the beginning of the chain, forwards finds the end.  If there's
2061     // a cycle it can be broken at any point,  so pick an edge and walk
2062     // backward until the list ends or we end where we started.
2063     GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
2064     for (int e = 0; e < edges.length(); e++) {
2065       MoveOperation* s = edges.at(e);
2066       if (!s->is_processed()) {
2067         MoveOperation* start = s;
2068         // search for the beginning of the chain or cycle
2069         while (start->prev() != NULL && start->prev() != s) {
2070           start = start->prev();
2071         }
2072         if (start->prev() == s) {
2073           start->break_cycle(temp_register);
2074         }
2075         // walk the chain forward inserting to store list
2076         while (start != NULL) {
2077           stores->append(start);
2078           start->set_processed();
2079           start = start->next();
2080         }
2081       }
2082     }
2083     return stores;
2084   }
2085 };
2086 
2087 static void verify_oop_args(MacroAssembler* masm,
2088                             const methodHandle& method,
2089                             const BasicType* sig_bt,
2090                             const VMRegPair* regs) {
2091   Register temp_reg = rbx;  // not part of any compiled calling seq
2092   if (VerifyOops) {
2093     for (int i = 0; i < method->size_of_parameters(); i++) {
2094       if (sig_bt[i] == T_OBJECT ||
2095           sig_bt[i] == T_ARRAY) {
2096         VMReg r = regs[i].first();
2097         assert(r->is_valid(), "bad oop arg");
2098         if (r->is_stack()) {
2099           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
2100           __ verify_oop(temp_reg);
2101         } else {
2102           __ verify_oop(r->as_Register());
2103         }
2104       }
2105     }
2106   }
2107 }
2108 
2109 static void gen_special_dispatch(MacroAssembler* masm,
2110                                  const methodHandle& method,
2111                                  const BasicType* sig_bt,
2112                                  const VMRegPair* regs) {
2113   verify_oop_args(masm, method, sig_bt, regs);
2114   vmIntrinsics::ID iid = method->intrinsic_id();
2115 
2116   // Now write the args into the outgoing interpreter space
2117   bool     has_receiver   = false;
2118   Register receiver_reg   = noreg;
2119   int      member_arg_pos = -1;
2120   Register member_reg     = noreg;
2121   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
2122   if (ref_kind != 0) {
2123     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
2124     member_reg = rbx;  // known to be free at this point
2125     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
2126   } else if (iid == vmIntrinsics::_invokeBasic) {
2127     has_receiver = true;
2128   } else {
2129     fatal("unexpected intrinsic id %d", iid);
2130   }
2131 
2132   if (member_reg != noreg) {
2133     // Load the member_arg into register, if necessary.
2134     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
2135     VMReg r = regs[member_arg_pos].first();
2136     if (r->is_stack()) {
2137       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
2138     } else {
2139       // no data motion is needed
2140       member_reg = r->as_Register();
2141     }
2142   }
2143 
2144   if (has_receiver) {
2145     // Make sure the receiver is loaded into a register.
2146     assert(method->size_of_parameters() > 0, "oob");
2147     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
2148     VMReg r = regs[0].first();
2149     assert(r->is_valid(), "bad receiver arg");
2150     if (r->is_stack()) {
2151       // Porting note:  This assumes that compiled calling conventions always
2152       // pass the receiver oop in a register.  If this is not true on some
2153       // platform, pick a temp and load the receiver from stack.
2154       fatal("receiver always in a register");
2155       receiver_reg = j_rarg0;  // known to be free at this point
2156       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
2157     } else {
2158       // no data motion is needed
2159       receiver_reg = r->as_Register();
2160     }
2161   }
2162 
2163   // Figure out which address we are really jumping to:
2164   MethodHandles::generate_method_handle_dispatch(masm, iid,
2165                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
2166 }
2167 
2168 // ---------------------------------------------------------------------------
2169 // Generate a native wrapper for a given method.  The method takes arguments
2170 // in the Java compiled code convention, marshals them to the native
2171 // convention (handlizes oops, etc), transitions to native, makes the call,
2172 // returns to java state (possibly blocking), unhandlizes any result and
2173 // returns.
2174 //
2175 // Critical native functions are a shorthand for the use of
2176 // GetPrimtiveArrayCritical and disallow the use of any other JNI
2177 // functions.  The wrapper is expected to unpack the arguments before
2178 // passing them to the callee and perform checks before and after the
2179 // native call to ensure that they GCLocker
2180 // lock_critical/unlock_critical semantics are followed.  Some other
2181 // parts of JNI setup are skipped like the tear down of the JNI handle
2182 // block and the check for pending exceptions it's impossible for them
2183 // to be thrown.
2184 //
2185 // They are roughly structured like this:
2186 //    if (GCLocker::needs_gc())
2187 //      SharedRuntime::block_for_jni_critical();
2188 //    tranistion to thread_in_native
2189 //    unpack arrray arguments and call native entry point
2190 //    check for safepoint in progress
2191 //    check if any thread suspend flags are set
2192 //      call into JVM and possible unlock the JNI critical
2193 //      if a GC was suppressed while in the critical native.
2194 //    transition back to thread_in_Java
2195 //    return to caller
2196 //
2197 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
2198                                                 const methodHandle& method,
2199                                                 int compile_id,
2200                                                 BasicType* in_sig_bt,
2201                                                 VMRegPair* in_regs,
2202                                                 BasicType ret_type) {
2203   if (method->is_method_handle_intrinsic()) {
2204     vmIntrinsics::ID iid = method->intrinsic_id();
2205     intptr_t start = (intptr_t)__ pc();
2206     int vep_offset = ((intptr_t)__ pc()) - start;
2207     gen_special_dispatch(masm,
2208                          method,
2209                          in_sig_bt,
2210                          in_regs);
2211     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
2212     __ flush();
2213     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
2214     return nmethod::new_native_nmethod(method,
2215                                        compile_id,
2216                                        masm->code(),
2217                                        vep_offset,
2218                                        frame_complete,
2219                                        stack_slots / VMRegImpl::slots_per_word,
2220                                        in_ByteSize(-1),
2221                                        in_ByteSize(-1),
2222                                        (OopMapSet*)NULL);
2223   }
2224   bool is_critical_native = true;
2225   address native_func = method->critical_native_function();
2226   if (native_func == NULL) {
2227     native_func = method->native_function();
2228     is_critical_native = false;
2229   }
2230   assert(native_func != NULL, "must have function");
2231 
2232   // An OopMap for lock (and class if static)
2233   OopMapSet *oop_maps = new OopMapSet();
2234   intptr_t start = (intptr_t)__ pc();
2235 
2236   // We have received a description of where all the java arg are located
2237   // on entry to the wrapper. We need to convert these args to where
2238   // the jni function will expect them. To figure out where they go
2239   // we convert the java signature to a C signature by inserting
2240   // the hidden arguments as arg[0] and possibly arg[1] (static method)
2241 
2242   const int total_in_args = method->size_of_parameters();
2243   int total_c_args = total_in_args;
2244   if (!is_critical_native) {
2245     total_c_args += 1;
2246     if (method->is_static()) {
2247       total_c_args++;
2248     }
2249   } else {
2250     for (int i = 0; i < total_in_args; i++) {
2251       if (in_sig_bt[i] == T_ARRAY) {
2252         total_c_args++;
2253       }
2254     }
2255   }
2256 
2257   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2258   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2259   BasicType* in_elem_bt = NULL;
2260 
2261   int argc = 0;
2262   if (!is_critical_native) {
2263     out_sig_bt[argc++] = T_ADDRESS;
2264     if (method->is_static()) {
2265       out_sig_bt[argc++] = T_OBJECT;
2266     }
2267 
2268     for (int i = 0; i < total_in_args ; i++ ) {
2269       out_sig_bt[argc++] = in_sig_bt[i];
2270     }
2271   } else {
2272     Thread* THREAD = Thread::current();
2273     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2274     SignatureStream ss(method->signature());
2275     for (int i = 0; i < total_in_args ; i++ ) {
2276       if (in_sig_bt[i] == T_ARRAY) {
2277         // Arrays are passed as int, elem* pair
2278         out_sig_bt[argc++] = T_INT;
2279         out_sig_bt[argc++] = T_ADDRESS;
2280         Symbol* atype = ss.as_symbol(CHECK_NULL);
2281         const char* at = atype->as_C_string();
2282         if (strlen(at) == 2) {
2283           assert(at[0] == '[', "must be");
2284           switch (at[1]) {
2285             case 'B': in_elem_bt[i]  = T_BYTE; break;
2286             case 'C': in_elem_bt[i]  = T_CHAR; break;
2287             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
2288             case 'F': in_elem_bt[i]  = T_FLOAT; break;
2289             case 'I': in_elem_bt[i]  = T_INT; break;
2290             case 'J': in_elem_bt[i]  = T_LONG; break;
2291             case 'S': in_elem_bt[i]  = T_SHORT; break;
2292             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
2293             default: ShouldNotReachHere();
2294           }
2295         }
2296       } else {
2297         out_sig_bt[argc++] = in_sig_bt[i];
2298         in_elem_bt[i] = T_VOID;
2299       }
2300       if (in_sig_bt[i] != T_VOID) {
2301         assert(in_sig_bt[i] == ss.type(), "must match");
2302         ss.next();
2303       }
2304     }
2305   }
2306 
2307   // Now figure out where the args must be stored and how much stack space
2308   // they require.
2309   int out_arg_slots;
2310   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2311 
2312   // Compute framesize for the wrapper.  We need to handlize all oops in
2313   // incoming registers
2314 
2315   // Calculate the total number of stack slots we will need.
2316 
2317   // First count the abi requirement plus all of the outgoing args
2318   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2319 
2320   // Now the space for the inbound oop handle area
2321   int total_save_slots = 6 * VMRegImpl::slots_per_word;  // 6 arguments passed in registers
2322   if (is_critical_native) {
2323     // Critical natives may have to call out so they need a save area
2324     // for register arguments.
2325     int double_slots = 0;
2326     int single_slots = 0;
2327     for ( int i = 0; i < total_in_args; i++) {
2328       if (in_regs[i].first()->is_Register()) {
2329         const Register reg = in_regs[i].first()->as_Register();
2330         switch (in_sig_bt[i]) {
2331           case T_BOOLEAN:
2332           case T_BYTE:
2333           case T_SHORT:
2334           case T_CHAR:
2335           case T_INT:  single_slots++; break;
2336           case T_ARRAY:  // specific to LP64 (7145024)
2337           case T_LONG: double_slots++; break;
2338           default:  ShouldNotReachHere();
2339         }
2340       } else if (in_regs[i].first()->is_XMMRegister()) {
2341         switch (in_sig_bt[i]) {
2342           case T_FLOAT:  single_slots++; break;
2343           case T_DOUBLE: double_slots++; break;
2344           default:  ShouldNotReachHere();
2345         }
2346       } else if (in_regs[i].first()->is_FloatRegister()) {
2347         ShouldNotReachHere();
2348       }
2349     }
2350     total_save_slots = double_slots * 2 + single_slots;
2351     // align the save area
2352     if (double_slots != 0) {
2353       stack_slots = align_up(stack_slots, 2);
2354     }
2355   }
2356 
2357   int oop_handle_offset = stack_slots;
2358   stack_slots += total_save_slots;
2359 
2360   // Now any space we need for handlizing a klass if static method
2361 
2362   int klass_slot_offset = 0;
2363   int klass_offset = -1;
2364   int lock_slot_offset = 0;
2365   bool is_static = false;
2366 
2367   if (method->is_static()) {
2368     klass_slot_offset = stack_slots;
2369     stack_slots += VMRegImpl::slots_per_word;
2370     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2371     is_static = true;
2372   }
2373 
2374   // Plus a lock if needed
2375 
2376   if (method->is_synchronized()) {
2377     lock_slot_offset = stack_slots;
2378     stack_slots += VMRegImpl::slots_per_word;
2379   }
2380 
2381   // Now a place (+2) to save return values or temp during shuffling
2382   // + 4 for return address (which we own) and saved rbp
2383   stack_slots += 6;
2384 
2385   // Ok The space we have allocated will look like:
2386   //
2387   //
2388   // FP-> |                     |
2389   //      |---------------------|
2390   //      | 2 slots for moves   |
2391   //      |---------------------|
2392   //      | lock box (if sync)  |
2393   //      |---------------------| <- lock_slot_offset
2394   //      | klass (if static)   |
2395   //      |---------------------| <- klass_slot_offset
2396   //      | oopHandle area      |
2397   //      |---------------------| <- oop_handle_offset (6 java arg registers)
2398   //      | outbound memory     |
2399   //      | based arguments     |
2400   //      |                     |
2401   //      |---------------------|
2402   //      |                     |
2403   // SP-> | out_preserved_slots |
2404   //
2405   //
2406 
2407 
2408   // Now compute actual number of stack words we need rounding to make
2409   // stack properly aligned.
2410   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
2411 
2412   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2413 
2414   // First thing make an ic check to see if we should even be here
2415 
2416   // We are free to use all registers as temps without saving them and
2417   // restoring them except rbp. rbp is the only callee save register
2418   // as far as the interpreter and the compiler(s) are concerned.
2419 
2420 
2421   const Register ic_reg = rax;
2422   const Register receiver = j_rarg0;
2423 
2424   Label hit;
2425   Label exception_pending;
2426 
2427   assert_different_registers(ic_reg, receiver, rscratch1);
2428   __ verify_oop(receiver);
2429   __ load_klass(rscratch1, receiver);
2430   __ cmpq(ic_reg, rscratch1);
2431   __ jcc(Assembler::equal, hit);
2432 
2433   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2434 
2435   // Verified entry point must be aligned
2436   __ align(8);
2437 
2438   __ bind(hit);
2439 
2440   int vep_offset = ((intptr_t)__ pc()) - start;
2441 
2442 #ifdef COMPILER1
2443   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
2444   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2445     inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
2446   }
2447 #endif // COMPILER1
2448 
2449   // The instruction at the verified entry point must be 5 bytes or longer
2450   // because it can be patched on the fly by make_non_entrant. The stack bang
2451   // instruction fits that requirement.
2452 
2453   // Generate stack overflow check
2454 
2455   if (UseStackBanging) {
2456     __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
2457   } else {
2458     // need a 5 byte instruction to allow MT safe patching to non-entrant
2459     __ fat_nop();
2460   }
2461 
2462   // Generate a new frame for the wrapper.
2463   __ enter();
2464   // -2 because return address is already present and so is saved rbp
2465   __ subptr(rsp, stack_size - 2*wordSize);
2466 
2467   // Frame is now completed as far as size and linkage.
2468   int frame_complete = ((intptr_t)__ pc()) - start;
2469 
2470     if (UseRTMLocking) {
2471       // Abort RTM transaction before calling JNI
2472       // because critical section will be large and will be
2473       // aborted anyway. Also nmethod could be deoptimized.
2474       __ xabort(0);
2475     }
2476 
2477 #ifdef ASSERT
2478     {
2479       Label L;
2480       __ mov(rax, rsp);
2481       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2482       __ cmpptr(rax, rsp);
2483       __ jcc(Assembler::equal, L);
2484       __ stop("improperly aligned stack");
2485       __ bind(L);
2486     }
2487 #endif /* ASSERT */
2488 
2489 
2490   // We use r14 as the oop handle for the receiver/klass
2491   // It is callee save so it survives the call to native
2492 
2493   const Register oop_handle_reg = r14;
2494 
2495   if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
2496     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2497                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2498   }
2499 
2500   //
2501   // We immediately shuffle the arguments so that any vm call we have to
2502   // make from here on out (sync slow path, jvmti, etc.) we will have
2503   // captured the oops from our caller and have a valid oopMap for
2504   // them.
2505 
2506   // -----------------
2507   // The Grand Shuffle
2508 
2509   // The Java calling convention is either equal (linux) or denser (win64) than the
2510   // c calling convention. However the because of the jni_env argument the c calling
2511   // convention always has at least one more (and two for static) arguments than Java.
2512   // Therefore if we move the args from java -> c backwards then we will never have
2513   // a register->register conflict and we don't have to build a dependency graph
2514   // and figure out how to break any cycles.
2515   //
2516 
2517   // Record esp-based slot for receiver on stack for non-static methods
2518   int receiver_offset = -1;
2519 
2520   // This is a trick. We double the stack slots so we can claim
2521   // the oops in the caller's frame. Since we are sure to have
2522   // more args than the caller doubling is enough to make
2523   // sure we can capture all the incoming oop args from the
2524   // caller.
2525   //
2526   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2527 
2528   // Mark location of rbp (someday)
2529   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2530 
2531   // Use eax, ebx as temporaries during any memory-memory moves we have to do
2532   // All inbound args are referenced based on rbp and all outbound args via rsp.
2533 
2534 
2535 #ifdef ASSERT
2536   bool reg_destroyed[RegisterImpl::number_of_registers];
2537   bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2538   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2539     reg_destroyed[r] = false;
2540   }
2541   for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2542     freg_destroyed[f] = false;
2543   }
2544 
2545 #endif /* ASSERT */
2546 
2547   // This may iterate in two different directions depending on the
2548   // kind of native it is.  The reason is that for regular JNI natives
2549   // the incoming and outgoing registers are offset upwards and for
2550   // critical natives they are offset down.
2551   GrowableArray<int> arg_order(2 * total_in_args);
2552   // Inbound arguments that need to be pinned for critical natives
2553   GrowableArray<int> pinned_args(total_in_args);
2554   // Current stack slot for storing register based array argument
2555   int pinned_slot = oop_handle_offset;
2556 
2557   VMRegPair tmp_vmreg;
2558   tmp_vmreg.set2(rbx->as_VMReg());
2559 
2560   if (!is_critical_native) {
2561     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2562       arg_order.push(i);
2563       arg_order.push(c_arg);
2564     }
2565   } else {
2566     // Compute a valid move order, using tmp_vmreg to break any cycles
2567     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2568   }
2569 
2570   int temploc = -1;
2571   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2572     int i = arg_order.at(ai);
2573     int c_arg = arg_order.at(ai + 1);
2574     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2575     if (c_arg == -1) {
2576       assert(is_critical_native, "should only be required for critical natives");
2577       // This arg needs to be moved to a temporary
2578       __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2579       in_regs[i] = tmp_vmreg;
2580       temploc = i;
2581       continue;
2582     } else if (i == -1) {
2583       assert(is_critical_native, "should only be required for critical natives");
2584       // Read from the temporary location
2585       assert(temploc != -1, "must be valid");
2586       i = temploc;
2587       temploc = -1;
2588     }
2589 #ifdef ASSERT
2590     if (in_regs[i].first()->is_Register()) {
2591       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2592     } else if (in_regs[i].first()->is_XMMRegister()) {
2593       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2594     }
2595     if (out_regs[c_arg].first()->is_Register()) {
2596       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2597     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2598       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2599     }
2600 #endif /* ASSERT */
2601     switch (in_sig_bt[i]) {
2602       case T_ARRAY:
2603         if (is_critical_native) {
2604           // pin before unpack
2605           if (Universe::heap()->supports_object_pinning()) {
2606             save_args(masm, total_c_args, 0, out_regs);
2607             gen_pin_object(masm, in_regs[i]);
2608             pinned_args.append(i);
2609             restore_args(masm, total_c_args, 0, out_regs);
2610 
2611             // rax has pinned array
2612             VMRegPair result_reg;
2613             result_reg.set_ptr(rax->as_VMReg());
2614             move_ptr(masm, result_reg, in_regs[i]);
2615             if (!in_regs[i].first()->is_stack()) {
2616               assert(pinned_slot <= stack_slots, "overflow");
2617               move_ptr(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
2618               pinned_slot += VMRegImpl::slots_per_word;
2619             }
2620           }
2621           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2622           c_arg++;
2623 #ifdef ASSERT
2624           if (out_regs[c_arg].first()->is_Register()) {
2625             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2626           } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2627             freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2628           }
2629 #endif
2630           break;
2631         }
2632       case T_OBJECT:
2633         assert(!is_critical_native, "no oop arguments");
2634         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2635                     ((i == 0) && (!is_static)),
2636                     &receiver_offset);
2637         break;
2638       case T_VOID:
2639         break;
2640 
2641       case T_FLOAT:
2642         float_move(masm, in_regs[i], out_regs[c_arg]);
2643           break;
2644 
2645       case T_DOUBLE:
2646         assert( i + 1 < total_in_args &&
2647                 in_sig_bt[i + 1] == T_VOID &&
2648                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2649         double_move(masm, in_regs[i], out_regs[c_arg]);
2650         break;
2651 
2652       case T_LONG :
2653         long_move(masm, in_regs[i], out_regs[c_arg]);
2654         break;
2655 
2656       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2657 
2658       default:
2659         move32_64(masm, in_regs[i], out_regs[c_arg]);
2660     }
2661   }
2662 
2663   int c_arg;
2664 
2665   // Pre-load a static method's oop into r14.  Used both by locking code and
2666   // the normal JNI call code.
2667   if (!is_critical_native) {
2668     // point c_arg at the first arg that is already loaded in case we
2669     // need to spill before we call out
2670     c_arg = total_c_args - total_in_args;
2671 
2672     if (method->is_static()) {
2673 
2674       //  load oop into a register
2675       __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2676 
2677       // Now handlize the static class mirror it's known not-null.
2678       __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2679       map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2680 
2681       // Now get the handle
2682       __ lea(oop_handle_reg, Address(rsp, klass_offset));
2683       // store the klass handle as second argument
2684       __ movptr(c_rarg1, oop_handle_reg);
2685       // and protect the arg if we must spill
2686       c_arg--;
2687     }
2688   } else {
2689     // For JNI critical methods we need to save all registers in save_args.
2690     c_arg = 0;
2691   }
2692 
2693   // Change state to native (we save the return address in the thread, since it might not
2694   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2695   // points into the right code segment. It does not have to be the correct return pc.
2696   // We use the same pc/oopMap repeatedly when we call out
2697 
2698   intptr_t the_pc = (intptr_t) __ pc();
2699   oop_maps->add_gc_map(the_pc - start, map);
2700 
2701   __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2702 
2703 
2704   // We have all of the arguments setup at this point. We must not touch any register
2705   // argument registers at this point (what if we save/restore them there are no oop?
2706 
2707   {
2708     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2709     // protect the args we've loaded
2710     save_args(masm, total_c_args, c_arg, out_regs);
2711     __ mov_metadata(c_rarg1, method());
2712     __ call_VM_leaf(
2713       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2714       r15_thread, c_rarg1);
2715     restore_args(masm, total_c_args, c_arg, out_regs);
2716   }
2717 
2718   // RedefineClasses() tracing support for obsolete method entry
2719   if (log_is_enabled(Trace, redefine, class, obsolete)) {
2720     // protect the args we've loaded
2721     save_args(masm, total_c_args, c_arg, out_regs);
2722     __ mov_metadata(c_rarg1, method());
2723     __ call_VM_leaf(
2724       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2725       r15_thread, c_rarg1);
2726     restore_args(masm, total_c_args, c_arg, out_regs);
2727   }
2728 
2729   // Lock a synchronized method
2730 
2731   // Register definitions used by locking and unlocking
2732 
2733   const Register swap_reg = rax;  // Must use rax for cmpxchg instruction
2734   const Register obj_reg  = rbx;  // Will contain the oop
2735   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
2736   const Register old_hdr  = r13;  // value of old header at unlock time
2737 
2738   Label slow_path_lock;
2739   Label lock_done;
2740 
2741   if (method->is_synchronized()) {
2742     assert(!is_critical_native, "unhandled");
2743 
2744 
2745     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2746 
2747     // Get the handle (the 2nd argument)
2748     __ mov(oop_handle_reg, c_rarg1);
2749 
2750     // Get address of the box
2751 
2752     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2753 
2754     // Load the oop from the handle
2755     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2756 
2757     __ resolve(IS_NOT_NULL, obj_reg);
2758     if (UseBiasedLocking) {
2759       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2760     }
2761 
2762     // Load immediate 1 into swap_reg %rax
2763     __ movl(swap_reg, 1);
2764 
2765     // Load (object->mark() | 1) into swap_reg %rax
2766     __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2767     if (EnableValhalla && !UseBiasedLocking) {
2768       // For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking
2769       __ andptr(swap_reg, ~markOopDesc::biased_lock_bit_in_place);
2770     }
2771 
2772     // Save (object->mark() | 1) into BasicLock's displaced header
2773     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2774 
2775     if (os::is_MP()) {
2776       __ lock();
2777     }
2778 
2779     // src -> dest iff dest == rax else rax <- dest
2780     __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2781     __ jcc(Assembler::equal, lock_done);
2782 
2783     // Hmm should this move to the slow path code area???
2784 
2785     // Test if the oopMark is an obvious stack pointer, i.e.,
2786     //  1) (mark & 3) == 0, and
2787     //  2) rsp <= mark < mark + os::pagesize()
2788     // These 3 tests can be done by evaluating the following
2789     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2790     // assuming both stack pointer and pagesize have their
2791     // least significant 2 bits clear.
2792     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2793 
2794     __ subptr(swap_reg, rsp);
2795     __ andptr(swap_reg, 3 - os::vm_page_size());
2796 
2797     // Save the test result, for recursive case, the result is zero
2798     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2799     __ jcc(Assembler::notEqual, slow_path_lock);
2800 
2801     // Slow path will re-enter here
2802 
2803     __ bind(lock_done);
2804   }
2805 
2806 
2807   // Finally just about ready to make the JNI call
2808 
2809 
2810   // get JNIEnv* which is first argument to native
2811   if (!is_critical_native) {
2812     __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2813   }
2814 
2815   // Now set thread in native
2816   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2817 
2818   __ call(RuntimeAddress(native_func));
2819 
2820   // Verify or restore cpu control state after JNI call
2821   __ restore_cpu_control_state_after_jni();
2822 
2823   // Unpack native results.
2824   switch (ret_type) {
2825   case T_BOOLEAN: __ c2bool(rax);            break;
2826   case T_CHAR   : __ movzwl(rax, rax);      break;
2827   case T_BYTE   : __ sign_extend_byte (rax); break;
2828   case T_SHORT  : __ sign_extend_short(rax); break;
2829   case T_INT    : /* nothing to do */        break;
2830   case T_DOUBLE :
2831   case T_FLOAT  :
2832     // Result is in xmm0 we'll save as needed
2833     break;
2834   case T_ARRAY:                 // Really a handle
2835   case T_OBJECT:                // Really a handle
2836       break; // can't de-handlize until after safepoint check
2837   case T_VOID: break;
2838   case T_LONG: break;
2839   default       : ShouldNotReachHere();
2840   }
2841 
2842   // unpin pinned arguments
2843   pinned_slot = oop_handle_offset;
2844   if (pinned_args.length() > 0) {
2845     // save return value that may be overwritten otherwise.
2846     save_native_result(masm, ret_type, stack_slots);
2847     for (int index = 0; index < pinned_args.length(); index ++) {
2848       int i = pinned_args.at(index);
2849       assert(pinned_slot <= stack_slots, "overflow");
2850       if (!in_regs[i].first()->is_stack()) {
2851         int offset = pinned_slot * VMRegImpl::stack_slot_size;
2852         __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
2853         pinned_slot += VMRegImpl::slots_per_word;
2854       }
2855       gen_unpin_object(masm, in_regs[i]);
2856     }
2857     restore_native_result(masm, ret_type, stack_slots);
2858   }
2859 
2860   // Switch thread to "native transition" state before reading the synchronization state.
2861   // This additional state is necessary because reading and testing the synchronization
2862   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2863   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2864   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2865   //     Thread A is resumed to finish this native method, but doesn't block here since it
2866   //     didn't see any synchronization is progress, and escapes.
2867   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2868 
2869   if(os::is_MP()) {
2870     if (UseMembar) {
2871       // Force this write out before the read below
2872       __ membar(Assembler::Membar_mask_bits(
2873            Assembler::LoadLoad | Assembler::LoadStore |
2874            Assembler::StoreLoad | Assembler::StoreStore));
2875     } else {
2876       // Write serialization page so VM thread can do a pseudo remote membar.
2877       // We use the current thread pointer to calculate a thread specific
2878       // offset to write to within the page. This minimizes bus traffic
2879       // due to cache line collision.
2880       __ serialize_memory(r15_thread, rcx);
2881     }
2882   }
2883 
2884   Label after_transition;
2885 
2886   // check for safepoint operation in progress and/or pending suspend requests
2887   {
2888     Label Continue;
2889     Label slow_path;
2890 
2891     __ safepoint_poll(slow_path, r15_thread, rscratch1);
2892 
2893     __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2894     __ jcc(Assembler::equal, Continue);
2895     __ bind(slow_path);
2896 
2897     // Don't use call_VM as it will see a possible pending exception and forward it
2898     // and never return here preventing us from clearing _last_native_pc down below.
2899     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2900     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2901     // by hand.
2902     //
2903     __ vzeroupper();
2904     save_native_result(masm, ret_type, stack_slots);
2905     __ mov(c_rarg0, r15_thread);
2906     __ mov(r12, rsp); // remember sp
2907     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2908     __ andptr(rsp, -16); // align stack as required by ABI
2909     if (!is_critical_native) {
2910       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2911     } else {
2912       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2913     }
2914     __ mov(rsp, r12); // restore sp
2915     __ reinit_heapbase();
2916     // Restore any method result value
2917     restore_native_result(masm, ret_type, stack_slots);
2918 
2919     if (is_critical_native) {
2920       // The call above performed the transition to thread_in_Java so
2921       // skip the transition logic below.
2922       __ jmpb(after_transition);
2923     }
2924 
2925     __ bind(Continue);
2926   }
2927 
2928   // change thread state
2929   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2930   __ bind(after_transition);
2931 
2932   Label reguard;
2933   Label reguard_done;
2934   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2935   __ jcc(Assembler::equal, reguard);
2936   __ bind(reguard_done);
2937 
2938   // native result if any is live
2939 
2940   // Unlock
2941   Label unlock_done;
2942   Label slow_path_unlock;
2943   if (method->is_synchronized()) {
2944 
2945     // Get locked oop from the handle we passed to jni
2946     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2947     __ resolve(IS_NOT_NULL, obj_reg);
2948 
2949     Label done;
2950 
2951     if (UseBiasedLocking) {
2952       __ biased_locking_exit(obj_reg, old_hdr, done);
2953     }
2954 
2955     // Simple recursive lock?
2956 
2957     __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2958     __ jcc(Assembler::equal, done);
2959 
2960     // Must save rax if if it is live now because cmpxchg must use it
2961     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2962       save_native_result(masm, ret_type, stack_slots);
2963     }
2964 
2965 
2966     // get address of the stack lock
2967     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2968     //  get old displaced header
2969     __ movptr(old_hdr, Address(rax, 0));
2970 
2971     // Atomic swap old header if oop still contains the stack lock
2972     if (os::is_MP()) {
2973       __ lock();
2974     }
2975     __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2976     __ jcc(Assembler::notEqual, slow_path_unlock);
2977 
2978     // slow path re-enters here
2979     __ bind(unlock_done);
2980     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2981       restore_native_result(masm, ret_type, stack_slots);
2982     }
2983 
2984     __ bind(done);
2985 
2986   }
2987   {
2988     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2989     save_native_result(masm, ret_type, stack_slots);
2990     __ mov_metadata(c_rarg1, method());
2991     __ call_VM_leaf(
2992          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2993          r15_thread, c_rarg1);
2994     restore_native_result(masm, ret_type, stack_slots);
2995   }
2996 
2997   __ reset_last_Java_frame(false);
2998 
2999   // Unbox oop result, e.g. JNIHandles::resolve value.
3000   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
3001     __ resolve_jobject(rax /* value */,
3002                        r15_thread /* thread */,
3003                        rcx /* tmp */);
3004   }
3005 
3006   if (CheckJNICalls) {
3007     // clear_pending_jni_exception_check
3008     __ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
3009   }
3010 
3011   if (!is_critical_native) {
3012     // reset handle block
3013     __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
3014     __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
3015   }
3016 
3017   // pop our frame
3018 
3019   __ leave();
3020 
3021   if (!is_critical_native) {
3022     // Any exception pending?
3023     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
3024     __ jcc(Assembler::notEqual, exception_pending);
3025   }
3026 
3027   // Return
3028 
3029   __ ret(0);
3030 
3031   // Unexpected paths are out of line and go here
3032 
3033   if (!is_critical_native) {
3034     // forward the exception
3035     __ bind(exception_pending);
3036 
3037     // and forward the exception
3038     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3039   }
3040 
3041   // Slow path locking & unlocking
3042   if (method->is_synchronized()) {
3043 
3044     // BEGIN Slow path lock
3045     __ bind(slow_path_lock);
3046 
3047     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
3048     // args are (oop obj, BasicLock* lock, JavaThread* thread)
3049 
3050     // protect the args we've loaded
3051     save_args(masm, total_c_args, c_arg, out_regs);
3052 
3053     __ mov(c_rarg0, obj_reg);
3054     __ mov(c_rarg1, lock_reg);
3055     __ mov(c_rarg2, r15_thread);
3056 
3057     // Not a leaf but we have last_Java_frame setup as we want
3058     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
3059     restore_args(masm, total_c_args, c_arg, out_regs);
3060 
3061 #ifdef ASSERT
3062     { Label L;
3063     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
3064     __ jcc(Assembler::equal, L);
3065     __ stop("no pending exception allowed on exit from monitorenter");
3066     __ bind(L);
3067     }
3068 #endif
3069     __ jmp(lock_done);
3070 
3071     // END Slow path lock
3072 
3073     // BEGIN Slow path unlock
3074     __ bind(slow_path_unlock);
3075 
3076     // If we haven't already saved the native result we must save it now as xmm registers
3077     // are still exposed.
3078     __ vzeroupper();
3079     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
3080       save_native_result(masm, ret_type, stack_slots);
3081     }
3082 
3083     __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
3084 
3085     __ mov(c_rarg0, obj_reg);
3086     __ mov(c_rarg2, r15_thread);
3087     __ mov(r12, rsp); // remember sp
3088     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3089     __ andptr(rsp, -16); // align stack as required by ABI
3090 
3091     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
3092     // NOTE that obj_reg == rbx currently
3093     __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
3094     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
3095 
3096     // args are (oop obj, BasicLock* lock, JavaThread* thread)
3097     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
3098     __ mov(rsp, r12); // restore sp
3099     __ reinit_heapbase();
3100 #ifdef ASSERT
3101     {
3102       Label L;
3103       __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
3104       __ jcc(Assembler::equal, L);
3105       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
3106       __ bind(L);
3107     }
3108 #endif /* ASSERT */
3109 
3110     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
3111 
3112     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
3113       restore_native_result(masm, ret_type, stack_slots);
3114     }
3115     __ jmp(unlock_done);
3116 
3117     // END Slow path unlock
3118 
3119   } // synchronized
3120 
3121   // SLOW PATH Reguard the stack if needed
3122 
3123   __ bind(reguard);
3124   __ vzeroupper();
3125   save_native_result(masm, ret_type, stack_slots);
3126   __ mov(r12, rsp); // remember sp
3127   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3128   __ andptr(rsp, -16); // align stack as required by ABI
3129   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
3130   __ mov(rsp, r12); // restore sp
3131   __ reinit_heapbase();
3132   restore_native_result(masm, ret_type, stack_slots);
3133   // and continue
3134   __ jmp(reguard_done);
3135 
3136 
3137 
3138   __ flush();
3139 
3140   nmethod *nm = nmethod::new_native_nmethod(method,
3141                                             compile_id,
3142                                             masm->code(),
3143                                             vep_offset,
3144                                             frame_complete,
3145                                             stack_slots / VMRegImpl::slots_per_word,
3146                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
3147                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
3148                                             oop_maps);
3149 
3150   if (is_critical_native) {
3151     nm->set_lazy_critical_native(true);
3152   }
3153 
3154   return nm;
3155 
3156 }
3157 
3158 // this function returns the adjust size (in number of words) to a c2i adapter
3159 // activation for use during deoptimization
3160 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
3161   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3162 }
3163 
3164 
3165 uint SharedRuntime::out_preserve_stack_slots() {
3166   return 0;
3167 }
3168 
3169 //------------------------------generate_deopt_blob----------------------------
3170 void SharedRuntime::generate_deopt_blob() {
3171   // Allocate space for the code
3172   ResourceMark rm;
3173   // Setup code generation tools
3174   int pad = 0;
3175 #if INCLUDE_JVMCI
3176   if (EnableJVMCI || UseAOT) {
3177     pad += 512; // Increase the buffer size when compiling for JVMCI
3178   }
3179 #endif
3180   CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
3181   MacroAssembler* masm = new MacroAssembler(&buffer);
3182   int frame_size_in_words;
3183   OopMap* map = NULL;
3184   OopMapSet *oop_maps = new OopMapSet();
3185 
3186   // -------------
3187   // This code enters when returning to a de-optimized nmethod.  A return
3188   // address has been pushed on the the stack, and return values are in
3189   // registers.
3190   // If we are doing a normal deopt then we were called from the patched
3191   // nmethod from the point we returned to the nmethod. So the return
3192   // address on the stack is wrong by NativeCall::instruction_size
3193   // We will adjust the value so it looks like we have the original return
3194   // address on the stack (like when we eagerly deoptimized).
3195   // In the case of an exception pending when deoptimizing, we enter
3196   // with a return address on the stack that points after the call we patched
3197   // into the exception handler. We have the following register state from,
3198   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
3199   //    rax: exception oop
3200   //    rbx: exception handler
3201   //    rdx: throwing pc
3202   // So in this case we simply jam rdx into the useless return address and
3203   // the stack looks just like we want.
3204   //
3205   // At this point we need to de-opt.  We save the argument return
3206   // registers.  We call the first C routine, fetch_unroll_info().  This
3207   // routine captures the return values and returns a structure which
3208   // describes the current frame size and the sizes of all replacement frames.
3209   // The current frame is compiled code and may contain many inlined
3210   // functions, each with their own JVM state.  We pop the current frame, then
3211   // push all the new frames.  Then we call the C routine unpack_frames() to
3212   // populate these frames.  Finally unpack_frames() returns us the new target
3213   // address.  Notice that callee-save registers are BLOWN here; they have
3214   // already been captured in the vframeArray at the time the return PC was
3215   // patched.
3216   address start = __ pc();
3217   Label cont;
3218 
3219   // Prolog for non exception case!
3220 
3221   // Save everything in sight.
3222   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3223 
3224   // Normal deoptimization.  Save exec mode for unpack_frames.
3225   __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
3226   __ jmp(cont);
3227 
3228   int reexecute_offset = __ pc() - start;
3229 #if INCLUDE_JVMCI && !defined(COMPILER1)
3230   if (EnableJVMCI && UseJVMCICompiler) {
3231     // JVMCI does not use this kind of deoptimization
3232     __ should_not_reach_here();
3233   }
3234 #endif
3235 
3236   // Reexecute case
3237   // return address is the pc describes what bci to do re-execute at
3238 
3239   // No need to update map as each call to save_live_registers will produce identical oopmap
3240   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3241 
3242   __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
3243   __ jmp(cont);
3244 
3245 #if INCLUDE_JVMCI
3246   Label after_fetch_unroll_info_call;
3247   int implicit_exception_uncommon_trap_offset = 0;
3248   int uncommon_trap_offset = 0;
3249 
3250   if (EnableJVMCI || UseAOT) {
3251     implicit_exception_uncommon_trap_offset = __ pc() - start;
3252 
3253     __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
3254     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD);
3255 
3256     uncommon_trap_offset = __ pc() - start;
3257 
3258     // Save everything in sight.
3259     RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3260     // fetch_unroll_info needs to call last_java_frame()
3261     __ set_last_Java_frame(noreg, noreg, NULL);
3262 
3263     __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
3264     __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
3265 
3266     __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
3267     __ mov(c_rarg0, r15_thread);
3268     __ movl(c_rarg2, r14); // exec mode
3269     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3270     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
3271 
3272     __ reset_last_Java_frame(false);
3273 
3274     __ jmp(after_fetch_unroll_info_call);
3275   } // EnableJVMCI
3276 #endif // INCLUDE_JVMCI
3277 
3278   int exception_offset = __ pc() - start;
3279 
3280   // Prolog for exception case
3281 
3282   // all registers are dead at this entry point, except for rax, and
3283   // rdx which contain the exception oop and exception pc
3284   // respectively.  Set them in TLS and fall thru to the
3285   // unpack_with_exception_in_tls entry point.
3286 
3287   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3288   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3289 
3290   int exception_in_tls_offset = __ pc() - start;
3291 
3292   // new implementation because exception oop is now passed in JavaThread
3293 
3294   // Prolog for exception case
3295   // All registers must be preserved because they might be used by LinearScan
3296   // Exceptiop oop and throwing PC are passed in JavaThread
3297   // tos: stack at point of call to method that threw the exception (i.e. only
3298   // args are on the stack, no return address)
3299 
3300   // make room on stack for the return address
3301   // It will be patched later with the throwing pc. The correct value is not
3302   // available now because loading it from memory would destroy registers.
3303   __ push(0);
3304 
3305   // Save everything in sight.
3306   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3307 
3308   // Now it is safe to overwrite any register
3309 
3310   // Deopt during an exception.  Save exec mode for unpack_frames.
3311   __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3312 
3313   // load throwing pc from JavaThread and patch it as the return address
3314   // of the current frame. Then clear the field in JavaThread
3315 
3316   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3317   __ movptr(Address(rbp, wordSize), rdx);
3318   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3319 
3320 #ifdef ASSERT
3321   // verify that there is really an exception oop in JavaThread
3322   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3323   __ verify_oop(rax);
3324 
3325   // verify that there is no pending exception
3326   Label no_pending_exception;
3327   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3328   __ testptr(rax, rax);
3329   __ jcc(Assembler::zero, no_pending_exception);
3330   __ stop("must not have pending exception here");
3331   __ bind(no_pending_exception);
3332 #endif
3333 
3334   __ bind(cont);
3335 
3336   // Call C code.  Need thread and this frame, but NOT official VM entry
3337   // crud.  We cannot block on this call, no GC can happen.
3338   //
3339   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3340 
3341   // fetch_unroll_info needs to call last_java_frame().
3342 
3343   __ set_last_Java_frame(noreg, noreg, NULL);
3344 #ifdef ASSERT
3345   { Label L;
3346     __ cmpptr(Address(r15_thread,
3347                     JavaThread::last_Java_fp_offset()),
3348             (int32_t)0);
3349     __ jcc(Assembler::equal, L);
3350     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3351     __ bind(L);
3352   }
3353 #endif // ASSERT
3354   __ mov(c_rarg0, r15_thread);
3355   __ movl(c_rarg1, r14); // exec_mode
3356   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3357 
3358   // Need to have an oopmap that tells fetch_unroll_info where to
3359   // find any register it might need.
3360   oop_maps->add_gc_map(__ pc() - start, map);
3361 
3362   __ reset_last_Java_frame(false);
3363 
3364 #if INCLUDE_JVMCI
3365   if (EnableJVMCI || UseAOT) {
3366     __ bind(after_fetch_unroll_info_call);
3367   }
3368 #endif
3369 
3370   // Load UnrollBlock* into rdi
3371   __ mov(rdi, rax);
3372 
3373   __ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
3374    Label noException;
3375   __ cmpl(r14, Deoptimization::Unpack_exception);   // Was exception pending?
3376   __ jcc(Assembler::notEqual, noException);
3377   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3378   // QQQ this is useless it was NULL above
3379   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3380   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3381   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3382 
3383   __ verify_oop(rax);
3384 
3385   // Overwrite the result registers with the exception results.
3386   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3387   // I think this is useless
3388   __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3389 
3390   __ bind(noException);
3391 
3392   // Only register save data is on the stack.
3393   // Now restore the result registers.  Everything else is either dead
3394   // or captured in the vframeArray.
3395   RegisterSaver::restore_result_registers(masm);
3396 
3397   // All of the register save area has been popped of the stack. Only the
3398   // return address remains.
3399 
3400   // Pop all the frames we must move/replace.
3401   //
3402   // Frame picture (youngest to oldest)
3403   // 1: self-frame (no frame link)
3404   // 2: deopting frame  (no frame link)
3405   // 3: caller of deopting frame (could be compiled/interpreted).
3406   //
3407   // Note: by leaving the return address of self-frame on the stack
3408   // and using the size of frame 2 to adjust the stack
3409   // when we are done the return to frame 3 will still be on the stack.
3410 
3411   // Pop deoptimized frame
3412   __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3413   __ addptr(rsp, rcx);
3414 
3415   // rsp should be pointing at the return address to the caller (3)
3416 
3417   // Pick up the initial fp we should save
3418   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3419   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3420 
3421 #ifdef ASSERT
3422   // Compilers generate code that bang the stack by as much as the
3423   // interpreter would need. So this stack banging should never
3424   // trigger a fault. Verify that it does not on non product builds.
3425   if (UseStackBanging) {
3426     __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3427     __ bang_stack_size(rbx, rcx);
3428   }
3429 #endif
3430 
3431   // Load address of array of frame pcs into rcx
3432   __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3433 
3434   // Trash the old pc
3435   __ addptr(rsp, wordSize);
3436 
3437   // Load address of array of frame sizes into rsi
3438   __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3439 
3440   // Load counter into rdx
3441   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3442 
3443   // Now adjust the caller's stack to make up for the extra locals
3444   // but record the original sp so that we can save it in the skeletal interpreter
3445   // frame and the stack walking of interpreter_sender will get the unextended sp
3446   // value and not the "real" sp value.
3447 
3448   const Register sender_sp = r8;
3449 
3450   __ mov(sender_sp, rsp);
3451   __ movl(rbx, Address(rdi,
3452                        Deoptimization::UnrollBlock::
3453                        caller_adjustment_offset_in_bytes()));
3454   __ subptr(rsp, rbx);
3455 
3456   // Push interpreter frames in a loop
3457   Label loop;
3458   __ bind(loop);
3459   __ movptr(rbx, Address(rsi, 0));      // Load frame size
3460   __ subptr(rbx, 2*wordSize);           // We'll push pc and ebp by hand
3461   __ pushptr(Address(rcx, 0));          // Save return address
3462   __ enter();                           // Save old & set new ebp
3463   __ subptr(rsp, rbx);                  // Prolog
3464   // This value is corrected by layout_activation_impl
3465   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3466   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3467   __ mov(sender_sp, rsp);               // Pass sender_sp to next frame
3468   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
3469   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
3470   __ decrementl(rdx);                   // Decrement counter
3471   __ jcc(Assembler::notZero, loop);
3472   __ pushptr(Address(rcx, 0));          // Save final return address
3473 
3474   // Re-push self-frame
3475   __ enter();                           // Save old & set new ebp
3476 
3477   // Allocate a full sized register save area.
3478   // Return address and rbp are in place, so we allocate two less words.
3479   __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3480 
3481   // Restore frame locals after moving the frame
3482   __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3483   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3484 
3485   // Call C code.  Need thread but NOT official VM entry
3486   // crud.  We cannot block on this call, no GC can happen.  Call should
3487   // restore return values to their stack-slots with the new SP.
3488   //
3489   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3490 
3491   // Use rbp because the frames look interpreted now
3492   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3493   // Don't need the precise return PC here, just precise enough to point into this code blob.
3494   address the_pc = __ pc();
3495   __ set_last_Java_frame(noreg, rbp, the_pc);
3496 
3497   __ andptr(rsp, -(StackAlignmentInBytes));  // Fix stack alignment as required by ABI
3498   __ mov(c_rarg0, r15_thread);
3499   __ movl(c_rarg1, r14); // second arg: exec_mode
3500   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3501   // Revert SP alignment after call since we're going to do some SP relative addressing below
3502   __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3503 
3504   // Set an oopmap for the call site
3505   // Use the same PC we used for the last java frame
3506   oop_maps->add_gc_map(the_pc - start,
3507                        new OopMap( frame_size_in_words, 0 ));
3508 
3509   // Clear fp AND pc
3510   __ reset_last_Java_frame(true);
3511 
3512   // Collect return values
3513   __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3514   __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3515   // I think this is useless (throwing pc?)
3516   __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3517 
3518   // Pop self-frame.
3519   __ leave();                           // Epilog
3520 
3521   // Jump to interpreter
3522   __ ret(0);
3523 
3524   // Make sure all code is generated
3525   masm->flush();
3526 
3527   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3528   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3529 #if INCLUDE_JVMCI
3530   if (EnableJVMCI || UseAOT) {
3531     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
3532     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
3533   }
3534 #endif
3535 }
3536 
3537 #ifdef COMPILER2
3538 //------------------------------generate_uncommon_trap_blob--------------------
3539 void SharedRuntime::generate_uncommon_trap_blob() {
3540   // Allocate space for the code
3541   ResourceMark rm;
3542   // Setup code generation tools
3543   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3544   MacroAssembler* masm = new MacroAssembler(&buffer);
3545 
3546   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3547 
3548   address start = __ pc();
3549 
3550   if (UseRTMLocking) {
3551     // Abort RTM transaction before possible nmethod deoptimization.
3552     __ xabort(0);
3553   }
3554 
3555   // Push self-frame.  We get here with a return address on the
3556   // stack, so rsp is 8-byte aligned until we allocate our frame.
3557   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3558 
3559   // No callee saved registers. rbp is assumed implicitly saved
3560   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3561 
3562   // compiler left unloaded_class_index in j_rarg0 move to where the
3563   // runtime expects it.
3564   __ movl(c_rarg1, j_rarg0);
3565 
3566   __ set_last_Java_frame(noreg, noreg, NULL);
3567 
3568   // Call C code.  Need thread but NOT official VM entry
3569   // crud.  We cannot block on this call, no GC can happen.  Call should
3570   // capture callee-saved registers as well as return values.
3571   // Thread is in rdi already.
3572   //
3573   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3574 
3575   __ mov(c_rarg0, r15_thread);
3576   __ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
3577   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3578 
3579   // Set an oopmap for the call site
3580   OopMapSet* oop_maps = new OopMapSet();
3581   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3582 
3583   // location of rbp is known implicitly by the frame sender code
3584 
3585   oop_maps->add_gc_map(__ pc() - start, map);
3586 
3587   __ reset_last_Java_frame(false);
3588 
3589   // Load UnrollBlock* into rdi
3590   __ mov(rdi, rax);
3591 
3592 #ifdef ASSERT
3593   { Label L;
3594     __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
3595             (int32_t)Deoptimization::Unpack_uncommon_trap);
3596     __ jcc(Assembler::equal, L);
3597     __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3598     __ bind(L);
3599   }
3600 #endif
3601 
3602   // Pop all the frames we must move/replace.
3603   //
3604   // Frame picture (youngest to oldest)
3605   // 1: self-frame (no frame link)
3606   // 2: deopting frame  (no frame link)
3607   // 3: caller of deopting frame (could be compiled/interpreted).
3608 
3609   // Pop self-frame.  We have no frame, and must rely only on rax and rsp.
3610   __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3611 
3612   // Pop deoptimized frame (int)
3613   __ movl(rcx, Address(rdi,
3614                        Deoptimization::UnrollBlock::
3615                        size_of_deoptimized_frame_offset_in_bytes()));
3616   __ addptr(rsp, rcx);
3617 
3618   // rsp should be pointing at the return address to the caller (3)
3619 
3620   // Pick up the initial fp we should save
3621   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3622   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3623 
3624 #ifdef ASSERT
3625   // Compilers generate code that bang the stack by as much as the
3626   // interpreter would need. So this stack banging should never
3627   // trigger a fault. Verify that it does not on non product builds.
3628   if (UseStackBanging) {
3629     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3630     __ bang_stack_size(rbx, rcx);
3631   }
3632 #endif
3633 
3634   // Load address of array of frame pcs into rcx (address*)
3635   __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3636 
3637   // Trash the return pc
3638   __ addptr(rsp, wordSize);
3639 
3640   // Load address of array of frame sizes into rsi (intptr_t*)
3641   __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3642 
3643   // Counter
3644   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3645 
3646   // Now adjust the caller's stack to make up for the extra locals but
3647   // record the original sp so that we can save it in the skeletal
3648   // interpreter frame and the stack walking of interpreter_sender
3649   // will get the unextended sp value and not the "real" sp value.
3650 
3651   const Register sender_sp = r8;
3652 
3653   __ mov(sender_sp, rsp);
3654   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3655   __ subptr(rsp, rbx);
3656 
3657   // Push interpreter frames in a loop
3658   Label loop;
3659   __ bind(loop);
3660   __ movptr(rbx, Address(rsi, 0)); // Load frame size
3661   __ subptr(rbx, 2 * wordSize);    // We'll push pc and rbp by hand
3662   __ pushptr(Address(rcx, 0));     // Save return address
3663   __ enter();                      // Save old & set new rbp
3664   __ subptr(rsp, rbx);             // Prolog
3665   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3666             sender_sp);            // Make it walkable
3667   // This value is corrected by layout_activation_impl
3668   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3669   __ mov(sender_sp, rsp);          // Pass sender_sp to next frame
3670   __ addptr(rsi, wordSize);        // Bump array pointer (sizes)
3671   __ addptr(rcx, wordSize);        // Bump array pointer (pcs)
3672   __ decrementl(rdx);              // Decrement counter
3673   __ jcc(Assembler::notZero, loop);
3674   __ pushptr(Address(rcx, 0));     // Save final return address
3675 
3676   // Re-push self-frame
3677   __ enter();                 // Save old & set new rbp
3678   __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3679                               // Prolog
3680 
3681   // Use rbp because the frames look interpreted now
3682   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3683   // Don't need the precise return PC here, just precise enough to point into this code blob.
3684   address the_pc = __ pc();
3685   __ set_last_Java_frame(noreg, rbp, the_pc);
3686 
3687   // Call C code.  Need thread but NOT official VM entry
3688   // crud.  We cannot block on this call, no GC can happen.  Call should
3689   // restore return values to their stack-slots with the new SP.
3690   // Thread is in rdi already.
3691   //
3692   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3693 
3694   __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3695   __ mov(c_rarg0, r15_thread);
3696   __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3697   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3698 
3699   // Set an oopmap for the call site
3700   // Use the same PC we used for the last java frame
3701   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3702 
3703   // Clear fp AND pc
3704   __ reset_last_Java_frame(true);
3705 
3706   // Pop self-frame.
3707   __ leave();                 // Epilog
3708 
3709   // Jump to interpreter
3710   __ ret(0);
3711 
3712   // Make sure all code is generated
3713   masm->flush();
3714 
3715   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
3716                                                  SimpleRuntimeFrame::framesize >> 1);
3717 }
3718 #endif // COMPILER2
3719 
3720 
3721 //------------------------------generate_handler_blob------
3722 //
3723 // Generate a special Compile2Runtime blob that saves all registers,
3724 // and setup oopmap.
3725 //
3726 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3727   assert(StubRoutines::forward_exception_entry() != NULL,
3728          "must be generated before");
3729 
3730   ResourceMark rm;
3731   OopMapSet *oop_maps = new OopMapSet();
3732   OopMap* map;
3733 
3734   // Allocate space for the code.  Setup code generation tools.
3735   CodeBuffer buffer("handler_blob", 2048, 1024);
3736   MacroAssembler* masm = new MacroAssembler(&buffer);
3737 
3738   address start   = __ pc();
3739   address call_pc = NULL;
3740   int frame_size_in_words;
3741   bool cause_return = (poll_type == POLL_AT_RETURN);
3742   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3743 
3744   if (UseRTMLocking) {
3745     // Abort RTM transaction before calling runtime
3746     // because critical section will be large and will be
3747     // aborted anyway. Also nmethod could be deoptimized.
3748     __ xabort(0);
3749   }
3750 
3751   // Make room for return address (or push it again)
3752   if (!cause_return) {
3753     __ push(rbx);
3754   }
3755 
3756   // Save registers, fpu state, and flags
3757   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3758 
3759   // The following is basically a call_VM.  However, we need the precise
3760   // address of the call in order to generate an oopmap. Hence, we do all the
3761   // work outselves.
3762 
3763   __ set_last_Java_frame(noreg, noreg, NULL);
3764 
3765   // The return address must always be correct so that frame constructor never
3766   // sees an invalid pc.
3767 
3768   if (!cause_return) {
3769     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3770     // Additionally, rbx is a callee saved register and we can look at it later to determine
3771     // if someone changed the return address for us!
3772     __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3773     __ movptr(Address(rbp, wordSize), rbx);
3774   }
3775 
3776   // Do the call
3777   __ mov(c_rarg0, r15_thread);
3778   __ call(RuntimeAddress(call_ptr));
3779 
3780   // Set an oopmap for the call site.  This oopmap will map all
3781   // oop-registers and debug-info registers as callee-saved.  This
3782   // will allow deoptimization at this safepoint to find all possible
3783   // debug-info recordings, as well as let GC find all oops.
3784 
3785   oop_maps->add_gc_map( __ pc() - start, map);
3786 
3787   Label noException;
3788 
3789   __ reset_last_Java_frame(false);
3790 
3791   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3792   __ jcc(Assembler::equal, noException);
3793 
3794   // Exception pending
3795 
3796   RegisterSaver::restore_live_registers(masm, save_vectors);
3797 
3798   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3799 
3800   // No exception case
3801   __ bind(noException);
3802 
3803   Label no_adjust, bail, no_prefix, not_special;
3804   if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
3805     // If our stashed return pc was modified by the runtime we avoid touching it
3806     __ cmpptr(rbx, Address(rbp, wordSize));
3807     __ jccb(Assembler::notEqual, no_adjust);
3808 
3809     // Skip over the poll instruction.
3810     // See NativeInstruction::is_safepoint_poll()
3811     // Possible encodings:
3812     //      85 00       test   %eax,(%rax)
3813     //      85 01       test   %eax,(%rcx)
3814     //      85 02       test   %eax,(%rdx)
3815     //      85 03       test   %eax,(%rbx)
3816     //      85 06       test   %eax,(%rsi)
3817     //      85 07       test   %eax,(%rdi)
3818     //
3819     //   41 85 00       test   %eax,(%r8)
3820     //   41 85 01       test   %eax,(%r9)
3821     //   41 85 02       test   %eax,(%r10)
3822     //   41 85 03       test   %eax,(%r11)
3823     //   41 85 06       test   %eax,(%r14)
3824     //   41 85 07       test   %eax,(%r15)
3825     //
3826     //      85 04 24    test   %eax,(%rsp)
3827     //   41 85 04 24    test   %eax,(%r12)
3828     //      85 45 00    test   %eax,0x0(%rbp)
3829     //   41 85 45 00    test   %eax,0x0(%r13)
3830 
3831     __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
3832     __ jcc(Assembler::notEqual, no_prefix);
3833     __ addptr(rbx, 1);
3834     __ bind(no_prefix);
3835 #ifdef ASSERT
3836     __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
3837 #endif
3838     // r12/r13/rsp/rbp base encoding takes 3 bytes with the following register values:
3839     // r12/rsp 0x04
3840     // r13/rbp 0x05
3841     __ movzbq(rcx, Address(rbx, 1));
3842     __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
3843     __ subptr(rcx, 4);    // looking for 0x00 .. 0x01
3844     __ cmpptr(rcx, 1);
3845     __ jcc(Assembler::above, not_special);
3846     __ addptr(rbx, 1);
3847     __ bind(not_special);
3848 #ifdef ASSERT
3849     // Verify the correct encoding of the poll we're about to skip.
3850     __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
3851     __ jcc(Assembler::notEqual, bail);
3852     // Mask out the modrm bits
3853     __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
3854     // rax encodes to 0, so if the bits are nonzero it's incorrect
3855     __ jcc(Assembler::notZero, bail);
3856 #endif
3857     // Adjust return pc forward to step over the safepoint poll instruction
3858     __ addptr(rbx, 2);
3859     __ movptr(Address(rbp, wordSize), rbx);
3860   }
3861 
3862   __ bind(no_adjust);
3863   // Normal exit, restore registers and exit.
3864   RegisterSaver::restore_live_registers(masm, save_vectors);
3865   __ ret(0);
3866 
3867 #ifdef ASSERT
3868   __ bind(bail);
3869   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3870 #endif
3871 
3872   // Make sure all code is generated
3873   masm->flush();
3874 
3875   // Fill-out other meta info
3876   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3877 }
3878 
3879 //
3880 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3881 //
3882 // Generate a stub that calls into vm to find out the proper destination
3883 // of a java call. All the argument registers are live at this point
3884 // but since this is generic code we don't know what they are and the caller
3885 // must do any gc of the args.
3886 //
3887 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3888   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3889 
3890   // allocate space for the code
3891   ResourceMark rm;
3892 
3893   CodeBuffer buffer(name, 1000, 512);
3894   MacroAssembler* masm                = new MacroAssembler(&buffer);
3895 
3896   int frame_size_in_words;
3897 
3898   OopMapSet *oop_maps = new OopMapSet();
3899   OopMap* map = NULL;
3900 
3901   int start = __ offset();
3902 
3903   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3904 
3905   int frame_complete = __ offset();
3906 
3907   __ set_last_Java_frame(noreg, noreg, NULL);
3908 
3909   __ mov(c_rarg0, r15_thread);
3910 
3911   __ call(RuntimeAddress(destination));
3912 
3913 
3914   // Set an oopmap for the call site.
3915   // We need this not only for callee-saved registers, but also for volatile
3916   // registers that the compiler might be keeping live across a safepoint.
3917 
3918   oop_maps->add_gc_map( __ offset() - start, map);
3919 
3920   // rax contains the address we are going to jump to assuming no exception got installed
3921 
3922   // clear last_Java_sp
3923   __ reset_last_Java_frame(false);
3924   // check for pending exceptions
3925   Label pending;
3926   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3927   __ jcc(Assembler::notEqual, pending);
3928 
3929   // get the returned Method*
3930   __ get_vm_result_2(rbx, r15_thread);
3931   __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3932 
3933   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3934 
3935   RegisterSaver::restore_live_registers(masm);
3936 
3937   // We are back the the original state on entry and ready to go.
3938 
3939   __ jmp(rax);
3940 
3941   // Pending exception after the safepoint
3942 
3943   __ bind(pending);
3944 
3945   RegisterSaver::restore_live_registers(masm);
3946 
3947   // exception pending => remove activation and forward to exception handler
3948 
3949   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3950 
3951   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3952   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3953 
3954   // -------------
3955   // make sure all code is generated
3956   masm->flush();
3957 
3958   // return the  blob
3959   // frame_size_words or bytes??
3960   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3961 }
3962 
3963 
3964 //------------------------------Montgomery multiplication------------------------
3965 //
3966 
3967 #ifndef _WINDOWS
3968 
3969 #define ASM_SUBTRACT
3970 
3971 #ifdef ASM_SUBTRACT
3972 // Subtract 0:b from carry:a.  Return carry.
3973 static unsigned long
3974 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3975   long i = 0, cnt = len;
3976   unsigned long tmp;
3977   asm volatile("clc; "
3978                "0: ; "
3979                "mov (%[b], %[i], 8), %[tmp]; "
3980                "sbb %[tmp], (%[a], %[i], 8); "
3981                "inc %[i]; dec %[cnt]; "
3982                "jne 0b; "
3983                "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3984                : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3985                : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3986                : "memory");
3987   return tmp;
3988 }
3989 #else // ASM_SUBTRACT
3990 typedef int __attribute__((mode(TI))) int128;
3991 
3992 // Subtract 0:b from carry:a.  Return carry.
3993 static unsigned long
3994 sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) {
3995   int128 tmp = 0;
3996   int i;
3997   for (i = 0; i < len; i++) {
3998     tmp += a[i];
3999     tmp -= b[i];
4000     a[i] = tmp;
4001     tmp >>= 64;
4002     assert(-1 <= tmp && tmp <= 0, "invariant");
4003   }
4004   return tmp + carry;
4005 }
4006 #endif // ! ASM_SUBTRACT
4007 
4008 // Multiply (unsigned) Long A by Long B, accumulating the double-
4009 // length result into the accumulator formed of T0, T1, and T2.
4010 #define MACC(A, B, T0, T1, T2)                                  \
4011 do {                                                            \
4012   unsigned long hi, lo;                                         \
4013   __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4"   \
4014            : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)  \
4015            : "r"(A), "a"(B) : "cc");                            \
4016  } while(0)
4017 
4018 // As above, but add twice the double-length result into the
4019 // accumulator.
4020 #define MACC2(A, B, T0, T1, T2)                                 \
4021 do {                                                            \
4022   unsigned long hi, lo;                                         \
4023   __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \
4024            "add %%rax, %2; adc %%rdx, %3; adc $0, %4"           \
4025            : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)  \
4026            : "r"(A), "a"(B) : "cc");                            \
4027  } while(0)
4028 
4029 // Fast Montgomery multiplication.  The derivation of the algorithm is
4030 // in  A Cryptographic Library for the Motorola DSP56000,
4031 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
4032 
4033 static void __attribute__((noinline))
4034 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
4035                     unsigned long m[], unsigned long inv, int len) {
4036   unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
4037   int i;
4038 
4039   assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
4040 
4041   for (i = 0; i < len; i++) {
4042     int j;
4043     for (j = 0; j < i; j++) {
4044       MACC(a[j], b[i-j], t0, t1, t2);
4045       MACC(m[j], n[i-j], t0, t1, t2);
4046     }
4047     MACC(a[i], b[0], t0, t1, t2);
4048     m[i] = t0 * inv;
4049     MACC(m[i], n[0], t0, t1, t2);
4050 
4051     assert(t0 == 0, "broken Montgomery multiply");
4052 
4053     t0 = t1; t1 = t2; t2 = 0;
4054   }
4055 
4056   for (i = len; i < 2*len; i++) {
4057     int j;
4058     for (j = i-len+1; j < len; j++) {
4059       MACC(a[j], b[i-j], t0, t1, t2);
4060       MACC(m[j], n[i-j], t0, t1, t2);
4061     }
4062     m[i-len] = t0;
4063     t0 = t1; t1 = t2; t2 = 0;
4064   }
4065 
4066   while (t0)
4067     t0 = sub(m, n, t0, len);
4068 }
4069 
4070 // Fast Montgomery squaring.  This uses asymptotically 25% fewer
4071 // multiplies so it should be up to 25% faster than Montgomery
4072 // multiplication.  However, its loop control is more complex and it
4073 // may actually run slower on some machines.
4074 
4075 static void __attribute__((noinline))
4076 montgomery_square(unsigned long a[], unsigned long n[],
4077                   unsigned long m[], unsigned long inv, int len) {
4078   unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
4079   int i;
4080 
4081   assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
4082 
4083   for (i = 0; i < len; i++) {
4084     int j;
4085     int end = (i+1)/2;
4086     for (j = 0; j < end; j++) {
4087       MACC2(a[j], a[i-j], t0, t1, t2);
4088       MACC(m[j], n[i-j], t0, t1, t2);
4089     }
4090     if ((i & 1) == 0) {
4091       MACC(a[j], a[j], t0, t1, t2);
4092     }
4093     for (; j < i; j++) {
4094       MACC(m[j], n[i-j], t0, t1, t2);
4095     }
4096     m[i] = t0 * inv;
4097     MACC(m[i], n[0], t0, t1, t2);
4098 
4099     assert(t0 == 0, "broken Montgomery square");
4100 
4101     t0 = t1; t1 = t2; t2 = 0;
4102   }
4103 
4104   for (i = len; i < 2*len; i++) {
4105     int start = i-len+1;
4106     int end = start + (len - start)/2;
4107     int j;
4108     for (j = start; j < end; j++) {
4109       MACC2(a[j], a[i-j], t0, t1, t2);
4110       MACC(m[j], n[i-j], t0, t1, t2);
4111     }
4112     if ((i & 1) == 0) {
4113       MACC(a[j], a[j], t0, t1, t2);
4114     }
4115     for (; j < len; j++) {
4116       MACC(m[j], n[i-j], t0, t1, t2);
4117     }
4118     m[i-len] = t0;
4119     t0 = t1; t1 = t2; t2 = 0;
4120   }
4121 
4122   while (t0)
4123     t0 = sub(m, n, t0, len);
4124 }
4125 
4126 // Swap words in a longword.
4127 static unsigned long swap(unsigned long x) {
4128   return (x << 32) | (x >> 32);
4129 }
4130 
4131 // Copy len longwords from s to d, word-swapping as we go.  The
4132 // destination array is reversed.
4133 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
4134   d += len;
4135   while(len-- > 0) {
4136     d--;
4137     *d = swap(*s);
4138     s++;
4139   }
4140 }
4141 
4142 // The threshold at which squaring is advantageous was determined
4143 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
4144 #define MONTGOMERY_SQUARING_THRESHOLD 64
4145 
4146 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
4147                                         jint len, jlong inv,
4148                                         jint *m_ints) {
4149   assert(len % 2 == 0, "array length in montgomery_multiply must be even");
4150   int longwords = len/2;
4151 
4152   // Make very sure we don't use so much space that the stack might
4153   // overflow.  512 jints corresponds to an 16384-bit integer and
4154   // will use here a total of 8k bytes of stack space.
4155   int total_allocation = longwords * sizeof (unsigned long) * 4;
4156   guarantee(total_allocation <= 8192, "must be");
4157   unsigned long *scratch = (unsigned long *)alloca(total_allocation);
4158 
4159   // Local scratch arrays
4160   unsigned long
4161     *a = scratch + 0 * longwords,
4162     *b = scratch + 1 * longwords,
4163     *n = scratch + 2 * longwords,
4164     *m = scratch + 3 * longwords;
4165 
4166   reverse_words((unsigned long *)a_ints, a, longwords);
4167   reverse_words((unsigned long *)b_ints, b, longwords);
4168   reverse_words((unsigned long *)n_ints, n, longwords);
4169 
4170   ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
4171 
4172   reverse_words(m, (unsigned long *)m_ints, longwords);
4173 }
4174 
4175 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
4176                                       jint len, jlong inv,
4177                                       jint *m_ints) {
4178   assert(len % 2 == 0, "array length in montgomery_square must be even");
4179   int longwords = len/2;
4180 
4181   // Make very sure we don't use so much space that the stack might
4182   // overflow.  512 jints corresponds to an 16384-bit integer and
4183   // will use here a total of 6k bytes of stack space.
4184   int total_allocation = longwords * sizeof (unsigned long) * 3;
4185   guarantee(total_allocation <= 8192, "must be");
4186   unsigned long *scratch = (unsigned long *)alloca(total_allocation);
4187 
4188   // Local scratch arrays
4189   unsigned long
4190     *a = scratch + 0 * longwords,
4191     *n = scratch + 1 * longwords,
4192     *m = scratch + 2 * longwords;
4193 
4194   reverse_words((unsigned long *)a_ints, a, longwords);
4195   reverse_words((unsigned long *)n_ints, n, longwords);
4196 
4197   if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
4198     ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
4199   } else {
4200     ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
4201   }
4202 
4203   reverse_words(m, (unsigned long *)m_ints, longwords);
4204 }
4205 
4206 #endif // WINDOWS
4207 
4208 #ifdef COMPILER2
4209 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
4210 //
4211 //------------------------------generate_exception_blob---------------------------
4212 // creates exception blob at the end
4213 // Using exception blob, this code is jumped from a compiled method.
4214 // (see emit_exception_handler in x86_64.ad file)
4215 //
4216 // Given an exception pc at a call we call into the runtime for the
4217 // handler in this method. This handler might merely restore state
4218 // (i.e. callee save registers) unwind the frame and jump to the
4219 // exception handler for the nmethod if there is no Java level handler
4220 // for the nmethod.
4221 //
4222 // This code is entered with a jmp.
4223 //
4224 // Arguments:
4225 //   rax: exception oop
4226 //   rdx: exception pc
4227 //
4228 // Results:
4229 //   rax: exception oop
4230 //   rdx: exception pc in caller or ???
4231 //   destination: exception handler of caller
4232 //
4233 // Note: the exception pc MUST be at a call (precise debug information)
4234 //       Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
4235 //
4236 
4237 void OptoRuntime::generate_exception_blob() {
4238   assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
4239   assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
4240   assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
4241 
4242   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
4243 
4244   // Allocate space for the code
4245   ResourceMark rm;
4246   // Setup code generation tools
4247   CodeBuffer buffer("exception_blob", 2048, 1024);
4248   MacroAssembler* masm = new MacroAssembler(&buffer);
4249 
4250 
4251   address start = __ pc();
4252 
4253   // Exception pc is 'return address' for stack walker
4254   __ push(rdx);
4255   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
4256 
4257   // Save callee-saved registers.  See x86_64.ad.
4258 
4259   // rbp is an implicitly saved callee saved register (i.e., the calling
4260   // convention will save/restore it in the prolog/epilog). Other than that
4261   // there are no callee save registers now that adapter frames are gone.
4262 
4263   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
4264 
4265   // Store exception in Thread object. We cannot pass any arguments to the
4266   // handle_exception call, since we do not want to make any assumption
4267   // about the size of the frame where the exception happened in.
4268   // c_rarg0 is either rdi (Linux) or rcx (Windows).
4269   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
4270   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
4271 
4272   // This call does all the hard work.  It checks if an exception handler
4273   // exists in the method.
4274   // If so, it returns the handler address.
4275   // If not, it prepares for stack-unwinding, restoring the callee-save
4276   // registers of the frame being removed.
4277   //
4278   // address OptoRuntime::handle_exception_C(JavaThread* thread)
4279 
4280   // At a method handle call, the stack may not be properly aligned
4281   // when returning with an exception.
4282   address the_pc = __ pc();
4283   __ set_last_Java_frame(noreg, noreg, the_pc);
4284   __ mov(c_rarg0, r15_thread);
4285   __ andptr(rsp, -(StackAlignmentInBytes));    // Align stack
4286   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
4287 
4288   // Set an oopmap for the call site.  This oopmap will only be used if we
4289   // are unwinding the stack.  Hence, all locations will be dead.
4290   // Callee-saved registers will be the same as the frame above (i.e.,
4291   // handle_exception_stub), since they were restored when we got the
4292   // exception.
4293 
4294   OopMapSet* oop_maps = new OopMapSet();
4295 
4296   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
4297 
4298   __ reset_last_Java_frame(false);
4299 
4300   // Restore callee-saved registers
4301 
4302   // rbp is an implicitly saved callee-saved register (i.e., the calling
4303   // convention will save restore it in prolog/epilog) Other than that
4304   // there are no callee save registers now that adapter frames are gone.
4305 
4306   __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4307 
4308   __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4309   __ pop(rdx);                  // No need for exception pc anymore
4310 
4311   // rax: exception handler
4312 
4313   // We have a handler in rax (could be deopt blob).
4314   __ mov(r8, rax);
4315 
4316   // Get the exception oop
4317   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4318   // Get the exception pc in case we are deoptimized
4319   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4320 #ifdef ASSERT
4321   __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4322   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4323 #endif
4324   // Clear the exception oop so GC no longer processes it as a root.
4325   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4326 
4327   // rax: exception oop
4328   // r8:  exception handler
4329   // rdx: exception pc
4330   // Jump to handler
4331 
4332   __ jmp(r8);
4333 
4334   // Make sure all code is generated
4335   masm->flush();
4336 
4337   // Set exception blob
4338   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4339 }
4340 #endif // COMPILER2
4341 
4342 BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) {
4343   BufferBlob* buf = BufferBlob::create("value types pack/unpack", 16 * K);
4344   CodeBuffer buffer(buf);
4345   short buffer_locs[20];
4346   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
4347                                          sizeof(buffer_locs)/sizeof(relocInfo));
4348 
4349   MacroAssembler _masm(&buffer);
4350   MacroAssembler* masm = &_masm;
4351 
4352   const Array<SigEntry>* sig_vk = vk->extended_sig();
4353   const Array<VMRegPair>* regs = vk->return_regs();
4354 
4355   int pack_fields_off = __ offset();
4356 
4357   int j = 1;
4358   for (int i = 0; i < sig_vk->length(); i++) {
4359     BasicType bt = sig_vk->at(i)._bt;
4360     if (bt == T_VALUETYPE) {
4361       continue;
4362     }
4363     if (bt == T_VOID) {
4364       if (sig_vk->at(i-1)._bt == T_LONG ||
4365           sig_vk->at(i-1)._bt == T_DOUBLE) {
4366         j++;
4367       }
4368       continue;
4369     }
4370     int off = sig_vk->at(i)._offset;
4371     VMRegPair pair = regs->at(j);
4372     VMReg r_1 = pair.first();
4373     VMReg r_2 = pair.second();
4374     Address to(rax, off);
4375     if (bt == T_FLOAT) {
4376       __ movflt(to, r_1->as_XMMRegister());
4377     } else if (bt == T_DOUBLE) {
4378       __ movdbl(to, r_1->as_XMMRegister());
4379     } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {
4380       __ store_heap_oop(to, r_1->as_Register());
4381     } else {
4382       assert(is_java_primitive(bt), "unexpected basic type");
4383       size_t size_in_bytes = type2aelembytes(bt);
4384       __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
4385     }
4386     j++;
4387   }
4388   assert(j == regs->length(), "missed a field?");
4389 
4390   __ ret(0);
4391 
4392   int unpack_fields_off = __ offset();
4393 
4394   j = 1;
4395   for (int i = 0; i < sig_vk->length(); i++) {
4396     BasicType bt = sig_vk->at(i)._bt;
4397     if (bt == T_VALUETYPE) {
4398       continue;
4399     }
4400     if (bt == T_VOID) {
4401       if (sig_vk->at(i-1)._bt == T_LONG ||
4402           sig_vk->at(i-1)._bt == T_DOUBLE) {
4403         j++;
4404       }
4405       continue;
4406     }
4407     int off = sig_vk->at(i)._offset;
4408     VMRegPair pair = regs->at(j);
4409     VMReg r_1 = pair.first();
4410     VMReg r_2 = pair.second();
4411     Address from(rax, off);
4412     if (bt == T_FLOAT) {
4413       __ movflt(r_1->as_XMMRegister(), from);
4414     } else if (bt == T_DOUBLE) {
4415       __ movdbl(r_1->as_XMMRegister(), from);
4416     } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {
4417       __ load_heap_oop(r_1->as_Register(), from);
4418     } else {
4419       assert(is_java_primitive(bt), "unexpected basic type");
4420       size_t size_in_bytes = type2aelembytes(bt);
4421       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
4422     }
4423     j++;
4424   }
4425   assert(j == regs->length(), "missed a field?");
4426 
4427   if (StressValueTypeReturnedAsFields) {
4428     __ load_klass(rax, rax);
4429     __ orptr(rax, 1);
4430   }
4431 
4432   __ ret(0);
4433 
4434   __ flush();
4435 
4436   return BufferedValueTypeBlob::create(&buffer, pack_fields_off, unpack_fields_off);
4437 }