1 /*
   2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "classfile/symbolTable.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/compiledICHolder.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/vframeArray.hpp"
  41 #include "utilities/align.hpp"
  42 #include "vm_version_x86.hpp"
  43 #include "vmreg_x86.inline.hpp"
  44 #ifdef COMPILER1
  45 #include "c1/c1_Runtime1.hpp"
  46 #endif
  47 #ifdef COMPILER2
  48 #include "opto/runtime.hpp"
  49 #endif
  50 #if INCLUDE_JVMCI
  51 #include "jvmci/jvmciJavaClasses.hpp"
  52 #endif
  53 
  54 #define __ masm->
  55 
  56 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  57 
  58 class SimpleRuntimeFrame {
  59 
  60   public:
  61 
  62   // Most of the runtime stubs have this simple frame layout.
  63   // This class exists to make the layout shared in one place.
  64   // Offsets are for compiler stack slots, which are jints.
  65   enum layout {
  66     // The frame sender code expects that rbp will be in the "natural" place and
  67     // will override any oopMap setting for it. We must therefore force the layout
  68     // so that it agrees with the frame sender code.
  69     rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
  70     rbp_off2,
  71     return_off, return_off2,
  72     framesize
  73   };
  74 };
  75 
  76 class RegisterSaver {
  77   // Capture info about frame layout.  Layout offsets are in jint
  78   // units because compiler frame slots are jints.
  79 #define XSAVE_AREA_BEGIN 160
  80 #define XSAVE_AREA_YMM_BEGIN 576
  81 #define XSAVE_AREA_ZMM_BEGIN 1152
  82 #define XSAVE_AREA_UPPERBANK 1664
  83 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  84 #define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
  85 #define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
  86   enum layout {
  87     fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
  88     xmm_off       = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt,            // offset in fxsave save area
  89     DEF_XMM_OFFS(0),
  90     DEF_XMM_OFFS(1),
  91     // 2..15 are implied in range usage
  92     ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
  93     DEF_YMM_OFFS(0),
  94     DEF_YMM_OFFS(1),
  95     // 2..15 are implied in range usage
  96     zmm_high = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
  97     zmm_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
  98     DEF_ZMM_OFFS(16),
  99     DEF_ZMM_OFFS(17),
 100     // 18..31 are implied in range usage
 101     fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
 102     fpu_stateH_end,
 103     r15_off, r15H_off,
 104     r14_off, r14H_off,
 105     r13_off, r13H_off,
 106     r12_off, r12H_off,
 107     r11_off, r11H_off,
 108     r10_off, r10H_off,
 109     r9_off,  r9H_off,
 110     r8_off,  r8H_off,
 111     rdi_off, rdiH_off,
 112     rsi_off, rsiH_off,
 113     ignore_off, ignoreH_off,  // extra copy of rbp
 114     rsp_off, rspH_off,
 115     rbx_off, rbxH_off,
 116     rdx_off, rdxH_off,
 117     rcx_off, rcxH_off,
 118     rax_off, raxH_off,
 119     // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
 120     align_off, alignH_off,
 121     flags_off, flagsH_off,
 122     // The frame sender code expects that rbp will be in the "natural" place and
 123     // will override any oopMap setting for it. We must therefore force the layout
 124     // so that it agrees with the frame sender code.
 125     rbp_off, rbpH_off,        // copy of rbp we will restore
 126     return_off, returnH_off,  // slot for return address
 127     reg_save_size             // size in compiler stack slots
 128   };
 129 
 130  public:
 131   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
 132   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
 133 
 134   // Offsets into the register save area
 135   // Used by deoptimization when it is managing result register
 136   // values on its own
 137 
 138   static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
 139   static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
 140   static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }
 141   static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
 142   static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
 143 
 144   // During deoptimization only the result registers need to be restored,
 145   // all the other values have already been extracted.
 146   static void restore_result_registers(MacroAssembler* masm);
 147 };
 148 
 149 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
 150   int off = 0;
 151   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 152   if (UseAVX < 3) {
 153     num_xmm_regs = num_xmm_regs/2;
 154   }
 155 #if defined(COMPILER2) || INCLUDE_JVMCI
 156   if (save_vectors) {
 157     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 158     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 159   }
 160 #else
 161   assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
 162 #endif
 163 
 164   // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
 165   int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
 166   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 167   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 168   // CodeBlob frame size is in words.
 169   int frame_size_in_words = frame_size_in_bytes / wordSize;
 170   *total_frame_words = frame_size_in_words;
 171 
 172   // Save registers, fpu state, and flags.
 173   // We assume caller has already pushed the return address onto the
 174   // stack, so rsp is 8-byte aligned here.
 175   // We push rpb twice in this sequence because we want the real rbp
 176   // to be under the return like a normal enter.
 177 
 178   __ enter();          // rsp becomes 16-byte aligned here
 179   __ push_CPU_state(); // Push a multiple of 16 bytes
 180 
 181   // push cpu state handles this on EVEX enabled targets
 182   if (save_vectors) {
 183     // Save upper half of YMM registers(0..15)
 184     int base_addr = XSAVE_AREA_YMM_BEGIN;
 185     for (int n = 0; n < 16; n++) {
 186       __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
 187     }
 188     if (VM_Version::supports_evex()) {
 189       // Save upper half of ZMM registers(0..15)
 190       base_addr = XSAVE_AREA_ZMM_BEGIN;
 191       for (int n = 0; n < 16; n++) {
 192         __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
 193       }
 194       // Save full ZMM registers(16..num_xmm_regs)
 195       base_addr = XSAVE_AREA_UPPERBANK;
 196       off = 0;
 197       int vector_len = Assembler::AVX_512bit;
 198       for (int n = 16; n < num_xmm_regs; n++) {
 199         __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
 200       }
 201     }
 202   } else {
 203     if (VM_Version::supports_evex()) {
 204       // Save upper bank of ZMM registers(16..31) for double/float usage
 205       int base_addr = XSAVE_AREA_UPPERBANK;
 206       off = 0;
 207       for (int n = 16; n < num_xmm_regs; n++) {
 208         __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
 209       }
 210     }
 211   }
 212   __ vzeroupper();
 213   if (frame::arg_reg_save_area_bytes != 0) {
 214     // Allocate argument register save area
 215     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 216   }
 217 
 218   // Set an oopmap for the call site.  This oopmap will map all
 219   // oop-registers and debug-info registers as callee-saved.  This
 220   // will allow deoptimization at this safepoint to find all possible
 221   // debug-info recordings, as well as let GC find all oops.
 222 
 223   OopMapSet *oop_maps = new OopMapSet();
 224   OopMap* map = new OopMap(frame_size_in_slots, 0);
 225 
 226 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
 227 
 228   map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
 229   map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
 230   map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
 231   map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
 232   // rbp location is known implicitly by the frame sender code, needs no oopmap
 233   // and the location where rbp was saved by is ignored
 234   map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
 235   map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
 236   map->set_callee_saved(STACK_OFFSET( r8_off  ), r8->as_VMReg());
 237   map->set_callee_saved(STACK_OFFSET( r9_off  ), r9->as_VMReg());
 238   map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
 239   map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
 240   map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
 241   map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
 242   map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
 243   map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
 244   // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 245   // on EVEX enabled targets, we get it included in the xsave area
 246   off = xmm0_off;
 247   int delta = xmm1_off - off;
 248   for (int n = 0; n < 16; n++) {
 249     XMMRegister xmm_name = as_XMMRegister(n);
 250     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 251     off += delta;
 252   }
 253   if(UseAVX > 2) {
 254     // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
 255     off = zmm16_off;
 256     delta = zmm17_off - off;
 257     for (int n = 16; n < num_xmm_regs; n++) {
 258       XMMRegister zmm_name = as_XMMRegister(n);
 259       map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
 260       off += delta;
 261     }
 262   }
 263 
 264 #if defined(COMPILER2) || INCLUDE_JVMCI
 265   if (save_vectors) {
 266     off = ymm0_off;
 267     int delta = ymm1_off - off;
 268     for (int n = 0; n < 16; n++) {
 269       XMMRegister ymm_name = as_XMMRegister(n);
 270       map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
 271       off += delta;
 272     }
 273   }
 274 #endif // COMPILER2 || INCLUDE_JVMCI
 275 
 276   // %%% These should all be a waste but we'll keep things as they were for now
 277   if (true) {
 278     map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
 279     map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
 280     map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
 281     map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
 282     // rbp location is known implicitly by the frame sender code, needs no oopmap
 283     map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
 284     map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
 285     map->set_callee_saved(STACK_OFFSET( r8H_off  ), r8->as_VMReg()->next());
 286     map->set_callee_saved(STACK_OFFSET( r9H_off  ), r9->as_VMReg()->next());
 287     map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
 288     map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
 289     map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
 290     map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
 291     map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
 292     map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
 293     // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 294     // on EVEX enabled targets, we get it included in the xsave area
 295     off = xmm0H_off;
 296     delta = xmm1H_off - off;
 297     for (int n = 0; n < 16; n++) {
 298       XMMRegister xmm_name = as_XMMRegister(n);
 299       map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
 300       off += delta;
 301     }
 302     if (UseAVX > 2) {
 303       // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
 304       off = zmm16H_off;
 305       delta = zmm17H_off - off;
 306       for (int n = 16; n < num_xmm_regs; n++) {
 307         XMMRegister zmm_name = as_XMMRegister(n);
 308         map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
 309         off += delta;
 310       }
 311     }
 312   }
 313 
 314   return map;
 315 }
 316 
 317 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 318   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 319   if (UseAVX < 3) {
 320     num_xmm_regs = num_xmm_regs/2;
 321   }
 322   if (frame::arg_reg_save_area_bytes != 0) {
 323     // Pop arg register save area
 324     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 325   }
 326 
 327 #if defined(COMPILER2) || INCLUDE_JVMCI
 328   if (restore_vectors) {
 329     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 330     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 331   }
 332 #else
 333   assert(!restore_vectors, "vectors are generated only by C2");
 334 #endif
 335 
 336   __ vzeroupper();
 337 
 338   // On EVEX enabled targets everything is handled in pop fpu state
 339   if (restore_vectors) {
 340     // Restore upper half of YMM registers (0..15)
 341     int base_addr = XSAVE_AREA_YMM_BEGIN;
 342     for (int n = 0; n < 16; n++) {
 343       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
 344     }
 345     if (VM_Version::supports_evex()) {
 346       // Restore upper half of ZMM registers (0..15)
 347       base_addr = XSAVE_AREA_ZMM_BEGIN;
 348       for (int n = 0; n < 16; n++) {
 349         __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
 350       }
 351       // Restore full ZMM registers(16..num_xmm_regs)
 352       base_addr = XSAVE_AREA_UPPERBANK;
 353       int vector_len = Assembler::AVX_512bit;
 354       int off = 0;
 355       for (int n = 16; n < num_xmm_regs; n++) {
 356         __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
 357       }
 358     }
 359   } else {
 360     if (VM_Version::supports_evex()) {
 361       // Restore upper bank of ZMM registers(16..31) for double/float usage
 362       int base_addr = XSAVE_AREA_UPPERBANK;
 363       int off = 0;
 364       for (int n = 16; n < num_xmm_regs; n++) {
 365         __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
 366       }
 367     }
 368   }
 369 
 370   // Recover CPU state
 371   __ pop_CPU_state();
 372   // Get the rbp described implicitly by the calling convention (no oopMap)
 373   __ pop(rbp);
 374 }
 375 
 376 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 377 
 378   // Just restore result register. Only used by deoptimization. By
 379   // now any callee save register that needs to be restored to a c2
 380   // caller of the deoptee has been extracted into the vframeArray
 381   // and will be stuffed into the c2i adapter we create for later
 382   // restoration so only result registers need to be restored here.
 383 
 384   // Restore fp result register
 385   __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
 386   // Restore integer result register
 387   __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
 388   __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
 389 
 390   // Pop all of the register save are off the stack except the return address
 391   __ addptr(rsp, return_offset_in_bytes());
 392 }
 393 
 394 // Is vector's size (in bytes) bigger than a size saved by default?
 395 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
 396 bool SharedRuntime::is_wide_vector(int size) {
 397   return size > 16;
 398 }
 399 
 400 size_t SharedRuntime::trampoline_size() {
 401   return 16;
 402 }
 403 
 404 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 405   __ jump(RuntimeAddress(destination));
 406 }
 407 
 408 // The java_calling_convention describes stack locations as ideal slots on
 409 // a frame with no abi restrictions. Since we must observe abi restrictions
 410 // (like the placement of the register window) the slots must be biased by
 411 // the following value.
 412 static int reg2offset_in(VMReg r) {
 413   // Account for saved rbp and return address
 414   // This should really be in_preserve_stack_slots
 415   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
 416 }
 417 
 418 static int reg2offset_out(VMReg r) {
 419   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 420 }
 421 
 422 // ---------------------------------------------------------------------------
 423 // Read the array of BasicTypes from a signature, and compute where the
 424 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 425 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 426 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 427 // as framesizes are fixed.
 428 // VMRegImpl::stack0 refers to the first slot 0(sp).
 429 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 430 // up to RegisterImpl::number_of_registers) are the 64-bit
 431 // integer registers.
 432 
 433 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 434 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 435 // units regardless of build. Of course for i486 there is no 64 bit build
 436 
 437 // The Java calling convention is a "shifted" version of the C ABI.
 438 // By skipping the first C ABI register we can call non-static jni methods
 439 // with small numbers of arguments without having to shuffle the arguments
 440 // at all. Since we control the java ABI we ought to at least get some
 441 // advantage out of it.
 442 
 443 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 444                                            VMRegPair *regs,
 445                                            int total_args_passed,
 446                                            int is_outgoing) {
 447 
 448   // Create the mapping between argument positions and
 449   // registers.
 450   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 451     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
 452   };
 453   static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 454     j_farg0, j_farg1, j_farg2, j_farg3,
 455     j_farg4, j_farg5, j_farg6, j_farg7
 456   };
 457 
 458 
 459   uint int_args = 0;
 460   uint fp_args = 0;
 461   uint stk_args = 0; // inc by 2 each time
 462 
 463   for (int i = 0; i < total_args_passed; i++) {
 464     switch (sig_bt[i]) {
 465     case T_BOOLEAN:
 466     case T_CHAR:
 467     case T_BYTE:
 468     case T_SHORT:
 469     case T_INT:
 470       if (int_args < Argument::n_int_register_parameters_j) {
 471         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 472       } else {
 473         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 474         stk_args += 2;
 475       }
 476       break;
 477     case T_VOID:
 478       // halves of T_LONG or T_DOUBLE
 479       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 480       regs[i].set_bad();
 481       break;
 482     case T_LONG:
 483       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 484       // fall through
 485     case T_OBJECT:
 486     case T_VALUETYPE:
 487     case T_ARRAY:
 488     case T_ADDRESS:
 489     case T_VALUETYPEPTR:
 490       if (int_args < Argument::n_int_register_parameters_j) {
 491         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 492       } else {
 493         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 494         stk_args += 2;
 495       }
 496       break;
 497     case T_FLOAT:
 498       if (fp_args < Argument::n_float_register_parameters_j) {
 499         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 500       } else {
 501         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 502         stk_args += 2;
 503       }
 504       break;
 505     case T_DOUBLE:
 506       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 507       if (fp_args < Argument::n_float_register_parameters_j) {
 508         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 509       } else {
 510         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 511         stk_args += 2;
 512       }
 513       break;
 514     default:
 515       ShouldNotReachHere();
 516       break;
 517     }
 518   }
 519 
 520   return align_up(stk_args, 2);
 521 }
 522 
 523 // Same as java_calling_convention() but for multiple return
 524 // values. There's no way to store them on the stack so if we don't
 525 // have enough registers, multiple values can't be returned.
 526 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1;
 527 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 528 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
 529                                           VMRegPair *regs,
 530                                           int total_args_passed) {
 531   // Create the mapping between argument positions and
 532   // registers.
 533   static const Register INT_ArgReg[java_return_convention_max_int] = {
 534     rax, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 535   };
 536   static const XMMRegister FP_ArgReg[java_return_convention_max_float] = {
 537     j_farg0, j_farg1, j_farg2, j_farg3,
 538     j_farg4, j_farg5, j_farg6, j_farg7
 539   };
 540 
 541 
 542   uint int_args = 0;
 543   uint fp_args = 0;
 544 
 545   for (int i = 0; i < total_args_passed; i++) {
 546     switch (sig_bt[i]) {
 547     case T_BOOLEAN:
 548     case T_CHAR:
 549     case T_BYTE:
 550     case T_SHORT:
 551     case T_INT:
 552       if (int_args < Argument::n_int_register_parameters_j+1) {
 553         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 554         int_args++;
 555       } else {
 556         return -1;
 557       }
 558       break;
 559     case T_VOID:
 560       // halves of T_LONG or T_DOUBLE
 561       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 562       regs[i].set_bad();
 563       break;
 564     case T_LONG:
 565       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 566       // fall through
 567     case T_OBJECT:
 568     case T_ARRAY:
 569     case T_ADDRESS:
 570     case T_METADATA:
 571     case T_VALUETYPEPTR:
 572       if (int_args < Argument::n_int_register_parameters_j+1) {
 573         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 574         int_args++;
 575       } else {
 576         return -1;
 577       }
 578       break;
 579     case T_FLOAT:
 580       if (fp_args < Argument::n_float_register_parameters_j) {
 581         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 582         fp_args++;
 583       } else {
 584         return -1;
 585       }
 586       break;
 587     case T_DOUBLE:
 588       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 589       if (fp_args < Argument::n_float_register_parameters_j) {
 590         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 591         fp_args++;
 592       } else {
 593         return -1;
 594       }
 595       break;
 596     default:
 597       ShouldNotReachHere();
 598       break;
 599     }
 600   }
 601 
 602   return int_args + fp_args;
 603 }
 604 
 605 // Patch the callers callsite with entry to compiled code if it exists.
 606 static void patch_callers_callsite(MacroAssembler *masm) {
 607   Label L;
 608   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 609   __ jcc(Assembler::equal, L);
 610 
 611   // Save the current stack pointer
 612   __ mov(r13, rsp);
 613   // Schedule the branch target address early.
 614   // Call into the VM to patch the caller, then jump to compiled callee
 615   // rax isn't live so capture return address while we easily can
 616   __ movptr(rax, Address(rsp, 0));
 617 
 618   // align stack so push_CPU_state doesn't fault
 619   __ andptr(rsp, -(StackAlignmentInBytes));
 620   __ push_CPU_state();
 621   __ vzeroupper();
 622   // VM needs caller's callsite
 623   // VM needs target method
 624   // This needs to be a long call since we will relocate this adapter to
 625   // the codeBuffer and it may not reach
 626 
 627   // Allocate argument register save area
 628   if (frame::arg_reg_save_area_bytes != 0) {
 629     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 630   }
 631   __ mov(c_rarg0, rbx);
 632   __ mov(c_rarg1, rax);
 633   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 634 
 635   // De-allocate argument register save area
 636   if (frame::arg_reg_save_area_bytes != 0) {
 637     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 638   }
 639 
 640   __ vzeroupper();
 641   __ pop_CPU_state();
 642   // restore sp
 643   __ mov(rsp, r13);
 644   __ bind(L);
 645 }
 646 
 647 // For each value type argument, sig includes the list of fields of
 648 // the value type. This utility function computes the number of
 649 // arguments for the call if value types are passed by reference (the
 650 // calling convention the interpreter expects).
 651 static int compute_total_args_passed_int(const GrowableArray<SigEntry>& sig_extended) {
 652   int total_args_passed = 0;
 653   if (ValueTypePassFieldsAsArgs) {
 654     for (int i = 0; i < sig_extended.length(); i++) {
 655       BasicType bt = sig_extended.at(i)._bt;
 656       if (bt == T_VALUETYPE) {
 657         // In sig_extended, a value type argument starts with:
 658         // T_VALUETYPE, followed by the types of the fields of the
 659         // value type and T_VOID to mark the end of the value
 660         // type. Value types are flattened so, for instance, in the
 661         // case of a value type with an int field and a value type
 662         // field that itself has 2 fields, an int and a long:
 663         // T_VALUETYPE T_INT T_VALUETYPE T_INT T_LONG T_VOID (second
 664         // slot for the T_LONG) T_VOID (inner T_VALUETYPE) T_VOID
 665         // (outer T_VALUETYPE)
 666         total_args_passed++;
 667         int vt = 1;
 668         do {
 669           i++;
 670           BasicType bt = sig_extended.at(i)._bt;
 671           BasicType prev_bt = sig_extended.at(i-1)._bt;
 672           if (bt == T_VALUETYPE) {
 673             vt++;
 674           } else if (bt == T_VOID &&
 675                      prev_bt != T_LONG &&
 676                      prev_bt != T_DOUBLE) {
 677             vt--;
 678           }
 679         } while (vt != 0);
 680       } else {
 681         total_args_passed++;
 682       }
 683     }
 684   } else {
 685     total_args_passed = sig_extended.length();
 686   }
 687   return total_args_passed;
 688 }
 689 
 690 
 691 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 692                                    BasicType bt,
 693                                    BasicType prev_bt,
 694                                    size_t size_in_bytes,
 695                                    const VMRegPair& reg_pair,
 696                                    const Address& to,
 697                                    int extraspace,
 698                                    bool is_oop) {
 699   assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here");
 700   if (bt == T_VOID) {
 701     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 702     return;
 703   }
 704 
 705   // Say 4 args:
 706   // i   st_off
 707   // 0   32 T_LONG
 708   // 1   24 T_VOID
 709   // 2   16 T_OBJECT
 710   // 3    8 T_BOOL
 711   // -    0 return address
 712   //
 713   // However to make thing extra confusing. Because we can fit a long/double in
 714   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 715   // leaves one slot empty and only stores to a single slot. In this case the
 716   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 717 
 718   bool wide = (size_in_bytes == wordSize);
 719   VMReg r_1 = reg_pair.first();
 720   VMReg r_2 = reg_pair.second();
 721   assert(r_2->is_valid() == wide, "invalid size");
 722   if (!r_1->is_valid()) {
 723     assert(!r_2->is_valid(), "must be invalid");
 724     return;
 725   }
 726 
 727   if (!r_1->is_XMMRegister()) {
 728     Register val = rax;
 729     assert_different_registers(to.base(), val);
 730     if(r_1->is_stack()) {
 731       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 732       __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 733     } else {
 734       val = r_1->as_Register();
 735     }
 736     if (is_oop) {
 737       __ store_heap_oop(to, val);
 738     } else {
 739       __ store_sized_value(to, val, size_in_bytes);
 740     }
 741   } else {
 742     if (wide) {
 743       __ movdbl(to, r_1->as_XMMRegister());
 744     } else {
 745       __ movflt(to, r_1->as_XMMRegister());
 746     }
 747   }
 748 }
 749 
 750 static void gen_c2i_adapter(MacroAssembler *masm,
 751                             const GrowableArray<SigEntry>& sig_extended,
 752                             const VMRegPair *regs,
 753                             Label& skip_fixup,
 754                             address start,
 755                             OopMapSet*& oop_maps,
 756                             int& frame_complete,
 757                             int& frame_size_in_words) {
 758   // Before we get into the guts of the C2I adapter, see if we should be here
 759   // at all.  We've come from compiled code and are attempting to jump to the
 760   // interpreter, which means the caller made a static call to get here
 761   // (vcalls always get a compiled target if there is one).  Check for a
 762   // compiled target.  If there is one, we need to patch the caller's call.
 763   patch_callers_callsite(masm);
 764 
 765   __ bind(skip_fixup);
 766 
 767   bool has_value_argument = false;
 768   if (ValueTypePassFieldsAsArgs) {
 769     // Is there a value type argument?
 770     for (int i = 0; i < sig_extended.length() && !has_value_argument; i++) {
 771       has_value_argument = (sig_extended.at(i)._bt == T_VALUETYPE);
 772     }
 773     if (has_value_argument) {
 774       // There is at least a value type argument: we're coming from
 775       // compiled code so we have no buffers to back the value
 776       // types. Allocate the buffers here with a runtime call.
 777       oop_maps = new OopMapSet();
 778       OopMap* map = NULL;
 779 
 780       map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
 781 
 782       frame_complete = __ offset();
 783 
 784       __ set_last_Java_frame(noreg, noreg, NULL);
 785 
 786       __ mov(c_rarg0, r15_thread);
 787       __ mov(c_rarg1, rbx);
 788 
 789       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_value_types)));
 790 
 791       oop_maps->add_gc_map((int)(__ pc() - start), map);
 792       __ reset_last_Java_frame(false);
 793 
 794       RegisterSaver::restore_live_registers(masm);
 795 
 796       Label no_exception;
 797       __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
 798       __ jcc(Assembler::equal, no_exception);
 799 
 800       __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
 801       __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
 802       __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 803 
 804       __ bind(no_exception);
 805 
 806       // We get an array of objects from the runtime call
 807       __ get_vm_result(r13, r15_thread); // Use r13 as temporary because r10 is trashed by movptr()
 808       __ get_vm_result_2(rbx, r15_thread); // TODO: required to keep the callee Method live?
 809       __ mov(r10, r13);
 810     }
 811   }
 812 
 813   // Since all args are passed on the stack, total_args_passed *
 814   // Interpreter::stackElementSize is the space we need. Plus 1 because
 815   // we also account for the return address location since
 816   // we store it first rather than hold it in rax across all the shuffling
 817   int total_args_passed = compute_total_args_passed_int(sig_extended);
 818   int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
 819 
 820   // stack is aligned, keep it that way
 821   extraspace = align_up(extraspace, 2*wordSize);
 822 
 823   // Get return address
 824   __ pop(rax);
 825 
 826   // set senderSP value
 827   __ mov(r13, rsp);
 828 
 829   __ subptr(rsp, extraspace);
 830 
 831   // Store the return address in the expected location
 832   __ movptr(Address(rsp, 0), rax);
 833 
 834   // Now write the args into the outgoing interpreter space
 835 
 836   // next_arg_comp is the next argument from the compiler point of
 837   // view (value type fields are passed in registers/on the stack). In
 838   // sig_extended, a value type argument starts with: T_VALUETYPE,
 839   // followed by the types of the fields of the value type and T_VOID
 840   // to mark the end of the value type. ignored counts the number of
 841   // T_VALUETYPE/T_VOID. next_vt_arg is the next value type argument:
 842   // used to get the buffer for that argument from the pool of buffers
 843   // we allocated above and want to pass to the
 844   // interpreter. next_arg_int is the next argument from the
 845   // interpreter point of view (value types are passed by reference).
 846   bool has_oop_field = false;
 847   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 848        next_arg_comp < sig_extended.length(); next_arg_comp++) {
 849     assert(ignored <= next_arg_comp, "shouldn't skip over more slot than there are arguments");
 850     assert(next_arg_int < total_args_passed, "more arguments for the interpreter than expected?");
 851     BasicType bt = sig_extended.at(next_arg_comp)._bt;
 852     int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize;
 853     if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {
 854       int next_off = st_off - Interpreter::stackElementSize;
 855       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 856       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 857       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 858       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
 859                              size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false);
 860       next_arg_int++;
 861 #ifdef ASSERT
 862       if (bt == T_LONG || bt == T_DOUBLE) {
 863         // Overwrite the unused slot with known junk
 864         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 865         __ movptr(Address(rsp, st_off), rax);
 866       }
 867 #endif /* ASSERT */
 868     } else {
 869       ignored++;
 870       // get the buffer from the just allocated pool of buffers
 871       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_VALUETYPE);
 872       __ load_heap_oop(r11, Address(r10, index));
 873       next_vt_arg++; next_arg_int++;
 874       int vt = 1;
 875       // write fields we get from compiled code in registers/stack
 876       // slots to the buffer: we know we are done with that value type
 877       // argument when we hit the T_VOID that acts as an end of value
 878       // type delimiter for this value type. Value types are flattened
 879       // so we might encounter embedded value types. Each entry in
 880       // sig_extended contains a field offset in the buffer.
 881       do {
 882         next_arg_comp++;
 883         BasicType bt = sig_extended.at(next_arg_comp)._bt;
 884         BasicType prev_bt = sig_extended.at(next_arg_comp-1)._bt;
 885         if (bt == T_VALUETYPE) {
 886           vt++;
 887           ignored++;
 888         } else if (bt == T_VOID &&
 889                    prev_bt != T_LONG &&
 890                    prev_bt != T_DOUBLE) {
 891           vt--;
 892           ignored++;
 893         } else {
 894           int off = sig_extended.at(next_arg_comp)._offset;
 895           assert(off > 0, "offset in object should be positive");
 896           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 897           bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY);
 898           has_oop_field = has_oop_field || is_oop;
 899           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
 900                                  size_in_bytes, regs[next_arg_comp-ignored], Address(r11, off), extraspace, is_oop);
 901         }
 902       } while (vt != 0);
 903       // pass the buffer to the interpreter
 904       __ movptr(Address(rsp, st_off), r11);
 905     }
 906   }
 907 
 908   // If a value type was allocated and initialized, apply post barrier to all oop fields
 909   if (has_value_argument && has_oop_field) {
 910     __ push(r13); // save senderSP
 911     __ push(rbx); // save callee
 912     // Allocate argument register save area
 913     if (frame::arg_reg_save_area_bytes != 0) {
 914       __ subptr(rsp, frame::arg_reg_save_area_bytes);
 915     }
 916     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::apply_post_barriers), r15_thread, r10);
 917     // De-allocate argument register save area
 918     if (frame::arg_reg_save_area_bytes != 0) {
 919       __ addptr(rsp, frame::arg_reg_save_area_bytes);
 920     }
 921     __ pop(rbx); // restore callee
 922     __ pop(r13); // restore sender SP
 923   }
 924 
 925   // Schedule the branch target address early.
 926   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 927   __ jmp(rcx);
 928 }
 929 
 930 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 931                         address code_start, address code_end,
 932                         Label& L_ok) {
 933   Label L_fail;
 934   __ lea(temp_reg, ExternalAddress(code_start));
 935   __ cmpptr(pc_reg, temp_reg);
 936   __ jcc(Assembler::belowEqual, L_fail);
 937   __ lea(temp_reg, ExternalAddress(code_end));
 938   __ cmpptr(pc_reg, temp_reg);
 939   __ jcc(Assembler::below, L_ok);
 940   __ bind(L_fail);
 941 }
 942 
 943 static void gen_i2c_adapter_helper(MacroAssembler* masm,
 944                                    BasicType bt,
 945                                    BasicType prev_bt,
 946                                    size_t size_in_bytes,
 947                                    const VMRegPair& reg_pair,
 948                                    const Address& from,
 949                                    bool is_oop) {
 950   assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here");
 951   if (bt == T_VOID) {
 952     // Longs and doubles are passed in native word order, but misaligned
 953     // in the 32-bit build.
 954     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 955     return;
 956   }
 957   assert(!reg_pair.second()->is_valid() || reg_pair.first()->next() == reg_pair.second(),
 958          "scrambled load targets?");
 959 
 960   bool wide = (size_in_bytes == wordSize);
 961   VMReg r_1 = reg_pair.first();
 962   VMReg r_2 = reg_pair.second();
 963   assert(r_2->is_valid() == wide, "invalid size");
 964   if (!r_1->is_valid()) {
 965     assert(!r_2->is_valid(), "must be invalid");
 966     return;
 967   }
 968 
 969   bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
 970   if (!r_1->is_XMMRegister()) {
 971     // We can use r13 as a temp here because compiled code doesn't need r13 as an input
 972     // and if we end up going thru a c2i because of a miss a reasonable value of r13
 973     // will be generated.
 974     Register dst = r_1->is_stack() ? r13 : r_1->as_Register();
 975     if (is_oop) {
 976       __ load_heap_oop(dst, from);
 977     } else {
 978       __ load_sized_value(dst, from, size_in_bytes, is_signed);
 979     }
 980     if (r_1->is_stack()) {
 981       // Convert stack slot to an SP offset (+ wordSize to account for return address)
 982       int st_off = reg_pair.first()->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 983       __ movq(Address(rsp, st_off), dst);
 984     }
 985   } else {
 986     if (wide) {
 987       __ movdbl(r_1->as_XMMRegister(), from);
 988     } else {
 989       __ movflt(r_1->as_XMMRegister(), from);
 990     }
 991   }
 992 }
 993 
 994 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 995                                     int comp_args_on_stack,
 996                                     const GrowableArray<SigEntry>& sig_extended,
 997                                     const VMRegPair *regs) {
 998 
 999   // Note: r13 contains the senderSP on entry. We must preserve it since
1000   // we may do a i2c -> c2i transition if we lose a race where compiled
1001   // code goes non-entrant while we get args ready.
1002   // In addition we use r13 to locate all the interpreter args as
1003   // we must align the stack to 16 bytes on an i2c entry else we
1004   // lose alignment we expect in all compiled code and register
1005   // save code can segv when fxsave instructions find improperly
1006   // aligned stack pointer.
1007 
1008   // Adapters can be frameless because they do not require the caller
1009   // to perform additional cleanup work, such as correcting the stack pointer.
1010   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
1011   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
1012   // even if a callee has modified the stack pointer.
1013   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
1014   // routinely repairs its caller's stack pointer (from sender_sp, which is set
1015   // up via the senderSP register).
1016   // In other words, if *either* the caller or callee is interpreted, we can
1017   // get the stack pointer repaired after a call.
1018   // This is why c2i and i2c adapters cannot be indefinitely composed.
1019   // In particular, if a c2i adapter were to somehow call an i2c adapter,
1020   // both caller and callee would be compiled methods, and neither would
1021   // clean up the stack pointer changes performed by the two adapters.
1022   // If this happens, control eventually transfers back to the compiled
1023   // caller, but with an uncorrected stack, causing delayed havoc.
1024 
1025   // Pick up the return address
1026   __ movptr(rax, Address(rsp, 0));
1027 
1028   if (VerifyAdapterCalls &&
1029       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
1030     // So, let's test for cascading c2i/i2c adapters right now.
1031     //  assert(Interpreter::contains($return_addr) ||
1032     //         StubRoutines::contains($return_addr),
1033     //         "i2c adapter must return to an interpreter frame");
1034     __ block_comment("verify_i2c { ");
1035     Label L_ok;
1036     if (Interpreter::code() != NULL)
1037       range_check(masm, rax, r11,
1038                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
1039                   L_ok);
1040     if (StubRoutines::code1() != NULL)
1041       range_check(masm, rax, r11,
1042                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
1043                   L_ok);
1044     if (StubRoutines::code2() != NULL)
1045       range_check(masm, rax, r11,
1046                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
1047                   L_ok);
1048     const char* msg = "i2c adapter must return to an interpreter frame";
1049     __ block_comment(msg);
1050     __ stop(msg);
1051     __ bind(L_ok);
1052     __ block_comment("} verify_i2ce ");
1053   }
1054 
1055   // Must preserve original SP for loading incoming arguments because
1056   // we need to align the outgoing SP for compiled code.
1057   __ movptr(r11, rsp);
1058 
1059   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
1060   // in registers, we will occasionally have no stack args.
1061   int comp_words_on_stack = 0;
1062   if (comp_args_on_stack) {
1063     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
1064     // registers are below.  By subtracting stack0, we either get a negative
1065     // number (all values in registers) or the maximum stack slot accessed.
1066 
1067     // Convert 4-byte c2 stack slots to words.
1068     comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1069     // Round up to miminum stack alignment, in wordSize
1070     comp_words_on_stack = align_up(comp_words_on_stack, 2);
1071     __ subptr(rsp, comp_words_on_stack * wordSize);
1072   }
1073 
1074 
1075   // Ensure compiled code always sees stack at proper alignment
1076   __ andptr(rsp, -16);
1077 
1078   // push the return address and misalign the stack that youngest frame always sees
1079   // as far as the placement of the call instruction
1080   __ push(rax);
1081 
1082   // Put saved SP in another register
1083   const Register saved_sp = rax;
1084   __ movptr(saved_sp, r11);
1085 
1086   // Will jump to the compiled code just as if compiled code was doing it.
1087   // Pre-load the register-jump target early, to schedule it better.
1088   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
1089 
1090 #if INCLUDE_JVMCI
1091   if (EnableJVMCI || UseAOT) {
1092     // check if this call should be routed towards a specific entry point
1093     __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1094     Label no_alternative_target;
1095     __ jcc(Assembler::equal, no_alternative_target);
1096     __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1097     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1098     __ bind(no_alternative_target);
1099   }
1100 #endif // INCLUDE_JVMCI
1101 
1102   int total_args_passed = compute_total_args_passed_int(sig_extended);
1103   // Now generate the shuffle code.  Pick up all register args and move the
1104   // rest through the floating point stack top.
1105 
1106   // next_arg_comp is the next argument from the compiler point of
1107   // view (value type fields are passed in registers/on the stack). In
1108   // sig_extended, a value type argument starts with: T_VALUETYPE,
1109   // followed by the types of the fields of the value type and T_VOID
1110   // to mark the end of the value type. ignored counts the number of
1111   // T_VALUETYPE/T_VOID. next_arg_int is the next argument from the
1112   // interpreter point of view (value types are passed by reference).
1113   for (int next_arg_comp = 0, ignored = 0, next_arg_int = 0; next_arg_comp < sig_extended.length(); next_arg_comp++) {
1114     assert(ignored <= next_arg_comp, "shouldn't skip over more slot than there are arguments");
1115     assert(next_arg_int < total_args_passed, "more arguments from the interpreter than expected?");
1116     BasicType bt = sig_extended.at(next_arg_comp)._bt;
1117     int ld_off = (total_args_passed - next_arg_int)*Interpreter::stackElementSize;
1118     if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {
1119       // Load in argument order going down.
1120       // Point to interpreter value (vs. tag)
1121       int next_off = ld_off - Interpreter::stackElementSize;
1122       int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
1123       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
1124       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
1125       gen_i2c_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
1126                              size_in_bytes, reg_pair, Address(saved_sp, offset), false);
1127       next_arg_int++;
1128     } else {
1129       next_arg_int++;
1130       ignored++;
1131       // get the buffer for that value type
1132       __ movptr(r10, Address(saved_sp, ld_off));
1133       int vt = 1;
1134       // load fields to registers/stack slots from the buffer: we know
1135       // we are done with that value type argument when we hit the
1136       // T_VOID that acts as an end of value type delimiter for this
1137       // value type. Value types are flattened so we might encounter
1138       // embedded value types. Each entry in sig_extended contains a
1139       // field offset in the buffer.
1140       do {
1141         next_arg_comp++;
1142         BasicType bt = sig_extended.at(next_arg_comp)._bt;
1143         BasicType prev_bt = sig_extended.at(next_arg_comp-1)._bt;
1144         if (bt == T_VALUETYPE) {
1145           vt++;
1146           ignored++;
1147         } else if (bt == T_VOID &&
1148                    prev_bt != T_LONG &&
1149                    prev_bt != T_DOUBLE) {
1150           vt--;
1151           ignored++;
1152         } else {
1153           int off = sig_extended.at(next_arg_comp)._offset;
1154           assert(off > 0, "offset in object should be positive");
1155           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
1156           bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY);
1157           gen_i2c_adapter_helper(masm, bt, prev_bt, size_in_bytes, regs[next_arg_comp - ignored], Address(r10, off), is_oop);
1158         }
1159       } while (vt != 0);
1160     }
1161   }
1162 
1163   // 6243940 We might end up in handle_wrong_method if
1164   // the callee is deoptimized as we race thru here. If that
1165   // happens we don't want to take a safepoint because the
1166   // caller frame will look interpreted and arguments are now
1167   // "compiled" so it is much better to make this transition
1168   // invisible to the stack walking code. Unfortunately if
1169   // we try and find the callee by normal means a safepoint
1170   // is possible. So we stash the desired callee in the thread
1171   // and the vm will find there should this case occur.
1172 
1173   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1174 
1175   // put Method* where a c2i would expect should we end up there
1176   // only needed because of c2 resolve stubs return Method* as a result in
1177   // rax
1178   __ mov(rax, rbx);
1179   __ jmp(r11);
1180 }
1181 
1182 // ---------------------------------------------------------------
1183 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1184                                                             int comp_args_on_stack,
1185                                                             const GrowableArray<SigEntry>& sig_extended,
1186                                                             const VMRegPair *regs,
1187                                                             AdapterFingerPrint* fingerprint,
1188                                                             AdapterBlob*& new_adapter) {
1189   address i2c_entry = __ pc();
1190 
1191   gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);
1192 
1193   // -------------------------------------------------------------------------
1194   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
1195   // to the interpreter.  The args start out packed in the compiled layout.  They
1196   // need to be unpacked into the interpreter layout.  This will almost always
1197   // require some stack space.  We grow the current (compiled) stack, then repack
1198   // the args.  We  finally end in a jump to the generic interpreter entry point.
1199   // On exit from the interpreter, the interpreter will restore our SP (lest the
1200   // compiled code, which relys solely on SP and not RBP, get sick).
1201 
1202   address c2i_unverified_entry = __ pc();
1203   Label skip_fixup;
1204   Label ok;
1205 
1206   Register holder = rax;
1207   Register receiver = j_rarg0;
1208   Register temp = rbx;
1209 
1210   {
1211     __ load_klass(temp, receiver);
1212     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
1213     __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
1214     __ jcc(Assembler::equal, ok);
1215     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1216 
1217     __ bind(ok);
1218     // Method might have been compiled since the call site was patched to
1219     // interpreted if that is the case treat it as a miss so we can get
1220     // the call site corrected.
1221     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
1222     __ jcc(Assembler::equal, skip_fixup);
1223     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1224   }
1225 
1226   address c2i_entry = __ pc();
1227 
1228   OopMapSet* oop_maps = NULL;
1229   int frame_complete = CodeOffsets::frame_never_safe;
1230   int frame_size_in_words = 0;
1231   gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
1232 
1233   __ flush();
1234   new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
1235 
1236   // If value types are passed as fields, save the extended signature as symbol in
1237   // the AdapterHandlerEntry to be used by nmethod::preserve_callee_argument_oops().
1238   Symbol* extended_signature = NULL;
1239   if (ValueTypePassFieldsAsArgs) {
1240     bool has_value_argument = false;
1241     Thread* THREAD = Thread::current();
1242     ResourceMark rm(THREAD);
1243     int length = sig_extended.length();
1244     char* sig_str = NEW_RESOURCE_ARRAY(char, 2*length + 3);
1245     int idx = 0;
1246     sig_str[idx++] = '(';
1247     for (int index = 0; index < length; index++) {
1248       BasicType bt = sig_extended.at(index)._bt;
1249       if (bt == T_VALUETYPE) {
1250         has_value_argument = true;
1251       } else if (bt == T_VALUETYPEPTR) {
1252         // non-flattened value type field
1253         sig_str[idx++] = type2char(T_VALUETYPE);
1254         sig_str[idx++] = ';';
1255       } else if (bt == T_VOID) {
1256         // Ignore
1257       } else {
1258         if (bt == T_ARRAY) {
1259           bt = T_OBJECT; // We don't know the element type, treat as Object
1260         }
1261         sig_str[idx++] = type2char(bt);
1262         if (bt == T_OBJECT) {
1263           sig_str[idx++] = ';';
1264         }
1265       }
1266     }
1267     sig_str[idx++] = ')';
1268     sig_str[idx++] = '\0';
1269     if (has_value_argument) {
1270       // Extended signature is only required if a value type argument is passed
1271       extended_signature = SymbolTable::new_permanent_symbol(sig_str, THREAD);
1272     }
1273   }
1274 
1275   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, extended_signature);
1276 }
1277 
1278 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1279                                          VMRegPair *regs,
1280                                          VMRegPair *regs2,
1281                                          int total_args_passed) {
1282   assert(regs2 == NULL, "not needed on x86");
1283 // We return the amount of VMRegImpl stack slots we need to reserve for all
1284 // the arguments NOT counting out_preserve_stack_slots.
1285 
1286 // NOTE: These arrays will have to change when c1 is ported
1287 #ifdef _WIN64
1288     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1289       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1290     };
1291     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1292       c_farg0, c_farg1, c_farg2, c_farg3
1293     };
1294 #else
1295     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1296       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
1297     };
1298     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1299       c_farg0, c_farg1, c_farg2, c_farg3,
1300       c_farg4, c_farg5, c_farg6, c_farg7
1301     };
1302 #endif // _WIN64
1303 
1304 
1305     uint int_args = 0;
1306     uint fp_args = 0;
1307     uint stk_args = 0; // inc by 2 each time
1308 
1309     for (int i = 0; i < total_args_passed; i++) {
1310       switch (sig_bt[i]) {
1311       case T_BOOLEAN:
1312       case T_CHAR:
1313       case T_BYTE:
1314       case T_SHORT:
1315       case T_INT:
1316         if (int_args < Argument::n_int_register_parameters_c) {
1317           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1318 #ifdef _WIN64
1319           fp_args++;
1320           // Allocate slots for callee to stuff register args the stack.
1321           stk_args += 2;
1322 #endif
1323         } else {
1324           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1325           stk_args += 2;
1326         }
1327         break;
1328       case T_LONG:
1329         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1330         // fall through
1331       case T_OBJECT:
1332       case T_ARRAY:
1333       case T_ADDRESS:
1334       case T_METADATA:
1335         if (int_args < Argument::n_int_register_parameters_c) {
1336           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1337 #ifdef _WIN64
1338           fp_args++;
1339           stk_args += 2;
1340 #endif
1341         } else {
1342           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1343           stk_args += 2;
1344         }
1345         break;
1346       case T_FLOAT:
1347         if (fp_args < Argument::n_float_register_parameters_c) {
1348           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1349 #ifdef _WIN64
1350           int_args++;
1351           // Allocate slots for callee to stuff register args the stack.
1352           stk_args += 2;
1353 #endif
1354         } else {
1355           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1356           stk_args += 2;
1357         }
1358         break;
1359       case T_DOUBLE:
1360         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1361         if (fp_args < Argument::n_float_register_parameters_c) {
1362           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1363 #ifdef _WIN64
1364           int_args++;
1365           // Allocate slots for callee to stuff register args the stack.
1366           stk_args += 2;
1367 #endif
1368         } else {
1369           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1370           stk_args += 2;
1371         }
1372         break;
1373       case T_VOID: // Halves of longs and doubles
1374         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1375         regs[i].set_bad();
1376         break;
1377       default:
1378         ShouldNotReachHere();
1379         break;
1380       }
1381     }
1382 #ifdef _WIN64
1383   // windows abi requires that we always allocate enough stack space
1384   // for 4 64bit registers to be stored down.
1385   if (stk_args < 8) {
1386     stk_args = 8;
1387   }
1388 #endif // _WIN64
1389 
1390   return stk_args;
1391 }
1392 
1393 // On 64 bit we will store integer like items to the stack as
1394 // 64 bits items (sparc abi) even though java would only store
1395 // 32bits for a parameter. On 32bit it will simply be 32 bits
1396 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1397 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1398   if (src.first()->is_stack()) {
1399     if (dst.first()->is_stack()) {
1400       // stack to stack
1401       __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1402       __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1403     } else {
1404       // stack to reg
1405       __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1406     }
1407   } else if (dst.first()->is_stack()) {
1408     // reg to stack
1409     // Do we really have to sign extend???
1410     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1411     __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1412   } else {
1413     // Do we really have to sign extend???
1414     // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1415     if (dst.first() != src.first()) {
1416       __ movq(dst.first()->as_Register(), src.first()->as_Register());
1417     }
1418   }
1419 }
1420 
1421 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1422   if (src.first()->is_stack()) {
1423     if (dst.first()->is_stack()) {
1424       // stack to stack
1425       __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1426       __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1427     } else {
1428       // stack to reg
1429       __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1430     }
1431   } else if (dst.first()->is_stack()) {
1432     // reg to stack
1433     __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1434   } else {
1435     if (dst.first() != src.first()) {
1436       __ movq(dst.first()->as_Register(), src.first()->as_Register());
1437     }
1438   }
1439 }
1440 
1441 // An oop arg. Must pass a handle not the oop itself
1442 static void object_move(MacroAssembler* masm,
1443                         OopMap* map,
1444                         int oop_handle_offset,
1445                         int framesize_in_slots,
1446                         VMRegPair src,
1447                         VMRegPair dst,
1448                         bool is_receiver,
1449                         int* receiver_offset) {
1450 
1451   // must pass a handle. First figure out the location we use as a handle
1452 
1453   Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1454 
1455   // See if oop is NULL if it is we need no handle
1456 
1457   if (src.first()->is_stack()) {
1458 
1459     // Oop is already on the stack as an argument
1460     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1461     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1462     if (is_receiver) {
1463       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1464     }
1465 
1466     __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1467     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1468     // conditionally move a NULL
1469     __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1470   } else {
1471 
1472     // Oop is in an a register we must store it to the space we reserve
1473     // on the stack for oop_handles and pass a handle if oop is non-NULL
1474 
1475     const Register rOop = src.first()->as_Register();
1476     int oop_slot;
1477     if (rOop == j_rarg0)
1478       oop_slot = 0;
1479     else if (rOop == j_rarg1)
1480       oop_slot = 1;
1481     else if (rOop == j_rarg2)
1482       oop_slot = 2;
1483     else if (rOop == j_rarg3)
1484       oop_slot = 3;
1485     else if (rOop == j_rarg4)
1486       oop_slot = 4;
1487     else {
1488       assert(rOop == j_rarg5, "wrong register");
1489       oop_slot = 5;
1490     }
1491 
1492     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1493     int offset = oop_slot*VMRegImpl::stack_slot_size;
1494 
1495     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1496     // Store oop in handle area, may be NULL
1497     __ movptr(Address(rsp, offset), rOop);
1498     if (is_receiver) {
1499       *receiver_offset = offset;
1500     }
1501 
1502     __ cmpptr(rOop, (int32_t)NULL_WORD);
1503     __ lea(rHandle, Address(rsp, offset));
1504     // conditionally move a NULL from the handle area where it was just stored
1505     __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1506   }
1507 
1508   // If arg is on the stack then place it otherwise it is already in correct reg.
1509   if (dst.first()->is_stack()) {
1510     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1511   }
1512 }
1513 
1514 // A float arg may have to do float reg int reg conversion
1515 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1516   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1517 
1518   // The calling conventions assures us that each VMregpair is either
1519   // all really one physical register or adjacent stack slots.
1520   // This greatly simplifies the cases here compared to sparc.
1521 
1522   if (src.first()->is_stack()) {
1523     if (dst.first()->is_stack()) {
1524       __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1525       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1526     } else {
1527       // stack to reg
1528       assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1529       __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1530     }
1531   } else if (dst.first()->is_stack()) {
1532     // reg to stack
1533     assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1534     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1535   } else {
1536     // reg to reg
1537     // In theory these overlap but the ordering is such that this is likely a nop
1538     if ( src.first() != dst.first()) {
1539       __ movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
1540     }
1541   }
1542 }
1543 
1544 // A long move
1545 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1546 
1547   // The calling conventions assures us that each VMregpair is either
1548   // all really one physical register or adjacent stack slots.
1549   // This greatly simplifies the cases here compared to sparc.
1550 
1551   if (src.is_single_phys_reg() ) {
1552     if (dst.is_single_phys_reg()) {
1553       if (dst.first() != src.first()) {
1554         __ mov(dst.first()->as_Register(), src.first()->as_Register());
1555       }
1556     } else {
1557       assert(dst.is_single_reg(), "not a stack pair");
1558       __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1559     }
1560   } else if (dst.is_single_phys_reg()) {
1561     assert(src.is_single_reg(),  "not a stack pair");
1562     __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1563   } else {
1564     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1565     __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1566     __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1567   }
1568 }
1569 
1570 // A double move
1571 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1572 
1573   // The calling conventions assures us that each VMregpair is either
1574   // all really one physical register or adjacent stack slots.
1575   // This greatly simplifies the cases here compared to sparc.
1576 
1577   if (src.is_single_phys_reg() ) {
1578     if (dst.is_single_phys_reg()) {
1579       // In theory these overlap but the ordering is such that this is likely a nop
1580       if ( src.first() != dst.first()) {
1581         __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1582       }
1583     } else {
1584       assert(dst.is_single_reg(), "not a stack pair");
1585       __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1586     }
1587   } else if (dst.is_single_phys_reg()) {
1588     assert(src.is_single_reg(),  "not a stack pair");
1589     __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1590   } else {
1591     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1592     __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1593     __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1594   }
1595 }
1596 
1597 
1598 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1599   // We always ignore the frame_slots arg and just use the space just below frame pointer
1600   // which by this time is free to use
1601   switch (ret_type) {
1602   case T_FLOAT:
1603     __ movflt(Address(rbp, -wordSize), xmm0);
1604     break;
1605   case T_DOUBLE:
1606     __ movdbl(Address(rbp, -wordSize), xmm0);
1607     break;
1608   case T_VOID:  break;
1609   default: {
1610     __ movptr(Address(rbp, -wordSize), rax);
1611     }
1612   }
1613 }
1614 
1615 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1616   // We always ignore the frame_slots arg and just use the space just below frame pointer
1617   // which by this time is free to use
1618   switch (ret_type) {
1619   case T_FLOAT:
1620     __ movflt(xmm0, Address(rbp, -wordSize));
1621     break;
1622   case T_DOUBLE:
1623     __ movdbl(xmm0, Address(rbp, -wordSize));
1624     break;
1625   case T_VOID:  break;
1626   default: {
1627     __ movptr(rax, Address(rbp, -wordSize));
1628     }
1629   }
1630 }
1631 
1632 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1633     for ( int i = first_arg ; i < arg_count ; i++ ) {
1634       if (args[i].first()->is_Register()) {
1635         __ push(args[i].first()->as_Register());
1636       } else if (args[i].first()->is_XMMRegister()) {
1637         __ subptr(rsp, 2*wordSize);
1638         __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1639       }
1640     }
1641 }
1642 
1643 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1644     for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1645       if (args[i].first()->is_Register()) {
1646         __ pop(args[i].first()->as_Register());
1647       } else if (args[i].first()->is_XMMRegister()) {
1648         __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1649         __ addptr(rsp, 2*wordSize);
1650       }
1651     }
1652 }
1653 
1654 
1655 static void save_or_restore_arguments(MacroAssembler* masm,
1656                                       const int stack_slots,
1657                                       const int total_in_args,
1658                                       const int arg_save_area,
1659                                       OopMap* map,
1660                                       VMRegPair* in_regs,
1661                                       BasicType* in_sig_bt) {
1662   // if map is non-NULL then the code should store the values,
1663   // otherwise it should load them.
1664   int slot = arg_save_area;
1665   // Save down double word first
1666   for ( int i = 0; i < total_in_args; i++) {
1667     if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1668       int offset = slot * VMRegImpl::stack_slot_size;
1669       slot += VMRegImpl::slots_per_word;
1670       assert(slot <= stack_slots, "overflow");
1671       if (map != NULL) {
1672         __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1673       } else {
1674         __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1675       }
1676     }
1677     if (in_regs[i].first()->is_Register() &&
1678         (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1679       int offset = slot * VMRegImpl::stack_slot_size;
1680       if (map != NULL) {
1681         __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1682         if (in_sig_bt[i] == T_ARRAY) {
1683           map->set_oop(VMRegImpl::stack2reg(slot));;
1684         }
1685       } else {
1686         __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1687       }
1688       slot += VMRegImpl::slots_per_word;
1689     }
1690   }
1691   // Save or restore single word registers
1692   for ( int i = 0; i < total_in_args; i++) {
1693     if (in_regs[i].first()->is_Register()) {
1694       int offset = slot * VMRegImpl::stack_slot_size;
1695       slot++;
1696       assert(slot <= stack_slots, "overflow");
1697 
1698       // Value is in an input register pass we must flush it to the stack
1699       const Register reg = in_regs[i].first()->as_Register();
1700       switch (in_sig_bt[i]) {
1701         case T_BOOLEAN:
1702         case T_CHAR:
1703         case T_BYTE:
1704         case T_SHORT:
1705         case T_INT:
1706           if (map != NULL) {
1707             __ movl(Address(rsp, offset), reg);
1708           } else {
1709             __ movl(reg, Address(rsp, offset));
1710           }
1711           break;
1712         case T_ARRAY:
1713         case T_LONG:
1714           // handled above
1715           break;
1716         case T_OBJECT:
1717         default: ShouldNotReachHere();
1718       }
1719     } else if (in_regs[i].first()->is_XMMRegister()) {
1720       if (in_sig_bt[i] == T_FLOAT) {
1721         int offset = slot * VMRegImpl::stack_slot_size;
1722         slot++;
1723         assert(slot <= stack_slots, "overflow");
1724         if (map != NULL) {
1725           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1726         } else {
1727           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1728         }
1729       }
1730     } else if (in_regs[i].first()->is_stack()) {
1731       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1732         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1733         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1734       }
1735     }
1736   }
1737 }
1738 
1739 
1740 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1741 // keeps a new JNI critical region from starting until a GC has been
1742 // forced.  Save down any oops in registers and describe them in an
1743 // OopMap.
1744 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1745                                                int stack_slots,
1746                                                int total_c_args,
1747                                                int total_in_args,
1748                                                int arg_save_area,
1749                                                OopMapSet* oop_maps,
1750                                                VMRegPair* in_regs,
1751                                                BasicType* in_sig_bt) {
1752   __ block_comment("check GCLocker::needs_gc");
1753   Label cont;
1754   __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1755   __ jcc(Assembler::equal, cont);
1756 
1757   // Save down any incoming oops and call into the runtime to halt for a GC
1758 
1759   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1760   save_or_restore_arguments(masm, stack_slots, total_in_args,
1761                             arg_save_area, map, in_regs, in_sig_bt);
1762 
1763   address the_pc = __ pc();
1764   oop_maps->add_gc_map( __ offset(), map);
1765   __ set_last_Java_frame(rsp, noreg, the_pc);
1766 
1767   __ block_comment("block_for_jni_critical");
1768   __ movptr(c_rarg0, r15_thread);
1769   __ mov(r12, rsp); // remember sp
1770   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1771   __ andptr(rsp, -16); // align stack as required by ABI
1772   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1773   __ mov(rsp, r12); // restore sp
1774   __ reinit_heapbase();
1775 
1776   __ reset_last_Java_frame(false);
1777 
1778   save_or_restore_arguments(masm, stack_slots, total_in_args,
1779                             arg_save_area, NULL, in_regs, in_sig_bt);
1780   __ bind(cont);
1781 #ifdef ASSERT
1782   if (StressCriticalJNINatives) {
1783     // Stress register saving
1784     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1785     save_or_restore_arguments(masm, stack_slots, total_in_args,
1786                               arg_save_area, map, in_regs, in_sig_bt);
1787     // Destroy argument registers
1788     for (int i = 0; i < total_in_args - 1; i++) {
1789       if (in_regs[i].first()->is_Register()) {
1790         const Register reg = in_regs[i].first()->as_Register();
1791         __ xorptr(reg, reg);
1792       } else if (in_regs[i].first()->is_XMMRegister()) {
1793         __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1794       } else if (in_regs[i].first()->is_FloatRegister()) {
1795         ShouldNotReachHere();
1796       } else if (in_regs[i].first()->is_stack()) {
1797         // Nothing to do
1798       } else {
1799         ShouldNotReachHere();
1800       }
1801       if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1802         i++;
1803       }
1804     }
1805 
1806     save_or_restore_arguments(masm, stack_slots, total_in_args,
1807                               arg_save_area, NULL, in_regs, in_sig_bt);
1808   }
1809 #endif
1810 }
1811 
1812 // Unpack an array argument into a pointer to the body and the length
1813 // if the array is non-null, otherwise pass 0 for both.
1814 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1815   Register tmp_reg = rax;
1816   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1817          "possible collision");
1818   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1819          "possible collision");
1820 
1821   __ block_comment("unpack_array_argument {");
1822 
1823   // Pass the length, ptr pair
1824   Label is_null, done;
1825   VMRegPair tmp;
1826   tmp.set_ptr(tmp_reg->as_VMReg());
1827   if (reg.first()->is_stack()) {
1828     // Load the arg up from the stack
1829     move_ptr(masm, reg, tmp);
1830     reg = tmp;
1831   }
1832   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1833   __ jccb(Assembler::equal, is_null);
1834   __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1835   move_ptr(masm, tmp, body_arg);
1836   // load the length relative to the body.
1837   __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1838                            arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1839   move32_64(masm, tmp, length_arg);
1840   __ jmpb(done);
1841   __ bind(is_null);
1842   // Pass zeros
1843   __ xorptr(tmp_reg, tmp_reg);
1844   move_ptr(masm, tmp, body_arg);
1845   move32_64(masm, tmp, length_arg);
1846   __ bind(done);
1847 
1848   __ block_comment("} unpack_array_argument");
1849 }
1850 
1851 
1852 // Different signatures may require very different orders for the move
1853 // to avoid clobbering other arguments.  There's no simple way to
1854 // order them safely.  Compute a safe order for issuing stores and
1855 // break any cycles in those stores.  This code is fairly general but
1856 // it's not necessary on the other platforms so we keep it in the
1857 // platform dependent code instead of moving it into a shared file.
1858 // (See bugs 7013347 & 7145024.)
1859 // Note that this code is specific to LP64.
1860 class ComputeMoveOrder: public StackObj {
1861   class MoveOperation: public ResourceObj {
1862     friend class ComputeMoveOrder;
1863    private:
1864     VMRegPair        _src;
1865     VMRegPair        _dst;
1866     int              _src_index;
1867     int              _dst_index;
1868     bool             _processed;
1869     MoveOperation*  _next;
1870     MoveOperation*  _prev;
1871 
1872     static int get_id(VMRegPair r) {
1873       return r.first()->value();
1874     }
1875 
1876    public:
1877     MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1878       _src(src)
1879     , _src_index(src_index)
1880     , _dst(dst)
1881     , _dst_index(dst_index)
1882     , _next(NULL)
1883     , _prev(NULL)
1884     , _processed(false) {
1885     }
1886 
1887     VMRegPair src() const              { return _src; }
1888     int src_id() const                 { return get_id(src()); }
1889     int src_index() const              { return _src_index; }
1890     VMRegPair dst() const              { return _dst; }
1891     void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1892     int dst_index() const              { return _dst_index; }
1893     int dst_id() const                 { return get_id(dst()); }
1894     MoveOperation* next() const       { return _next; }
1895     MoveOperation* prev() const       { return _prev; }
1896     void set_processed()               { _processed = true; }
1897     bool is_processed() const          { return _processed; }
1898 
1899     // insert
1900     void break_cycle(VMRegPair temp_register) {
1901       // create a new store following the last store
1902       // to move from the temp_register to the original
1903       MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1904 
1905       // break the cycle of links and insert new_store at the end
1906       // break the reverse link.
1907       MoveOperation* p = prev();
1908       assert(p->next() == this, "must be");
1909       _prev = NULL;
1910       p->_next = new_store;
1911       new_store->_prev = p;
1912 
1913       // change the original store to save it's value in the temp.
1914       set_dst(-1, temp_register);
1915     }
1916 
1917     void link(GrowableArray<MoveOperation*>& killer) {
1918       // link this store in front the store that it depends on
1919       MoveOperation* n = killer.at_grow(src_id(), NULL);
1920       if (n != NULL) {
1921         assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1922         _next = n;
1923         n->_prev = this;
1924       }
1925     }
1926   };
1927 
1928  private:
1929   GrowableArray<MoveOperation*> edges;
1930 
1931  public:
1932   ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1933                     BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1934     // Move operations where the dest is the stack can all be
1935     // scheduled first since they can't interfere with the other moves.
1936     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1937       if (in_sig_bt[i] == T_ARRAY) {
1938         c_arg--;
1939         if (out_regs[c_arg].first()->is_stack() &&
1940             out_regs[c_arg + 1].first()->is_stack()) {
1941           arg_order.push(i);
1942           arg_order.push(c_arg);
1943         } else {
1944           if (out_regs[c_arg].first()->is_stack() ||
1945               in_regs[i].first() == out_regs[c_arg].first()) {
1946             add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1947           } else {
1948             add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1949           }
1950         }
1951       } else if (in_sig_bt[i] == T_VOID) {
1952         arg_order.push(i);
1953         arg_order.push(c_arg);
1954       } else {
1955         if (out_regs[c_arg].first()->is_stack() ||
1956             in_regs[i].first() == out_regs[c_arg].first()) {
1957           arg_order.push(i);
1958           arg_order.push(c_arg);
1959         } else {
1960           add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1961         }
1962       }
1963     }
1964     // Break any cycles in the register moves and emit the in the
1965     // proper order.
1966     GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1967     for (int i = 0; i < stores->length(); i++) {
1968       arg_order.push(stores->at(i)->src_index());
1969       arg_order.push(stores->at(i)->dst_index());
1970     }
1971  }
1972 
1973   // Collected all the move operations
1974   void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1975     if (src.first() == dst.first()) return;
1976     edges.append(new MoveOperation(src_index, src, dst_index, dst));
1977   }
1978 
1979   // Walk the edges breaking cycles between moves.  The result list
1980   // can be walked in order to produce the proper set of loads
1981   GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1982     // Record which moves kill which values
1983     GrowableArray<MoveOperation*> killer;
1984     for (int i = 0; i < edges.length(); i++) {
1985       MoveOperation* s = edges.at(i);
1986       assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1987       killer.at_put_grow(s->dst_id(), s, NULL);
1988     }
1989     assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1990            "make sure temp isn't in the registers that are killed");
1991 
1992     // create links between loads and stores
1993     for (int i = 0; i < edges.length(); i++) {
1994       edges.at(i)->link(killer);
1995     }
1996 
1997     // at this point, all the move operations are chained together
1998     // in a doubly linked list.  Processing it backwards finds
1999     // the beginning of the chain, forwards finds the end.  If there's
2000     // a cycle it can be broken at any point,  so pick an edge and walk
2001     // backward until the list ends or we end where we started.
2002     GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
2003     for (int e = 0; e < edges.length(); e++) {
2004       MoveOperation* s = edges.at(e);
2005       if (!s->is_processed()) {
2006         MoveOperation* start = s;
2007         // search for the beginning of the chain or cycle
2008         while (start->prev() != NULL && start->prev() != s) {
2009           start = start->prev();
2010         }
2011         if (start->prev() == s) {
2012           start->break_cycle(temp_register);
2013         }
2014         // walk the chain forward inserting to store list
2015         while (start != NULL) {
2016           stores->append(start);
2017           start->set_processed();
2018           start = start->next();
2019         }
2020       }
2021     }
2022     return stores;
2023   }
2024 };
2025 
2026 static void verify_oop_args(MacroAssembler* masm,
2027                             const methodHandle& method,
2028                             const BasicType* sig_bt,
2029                             const VMRegPair* regs) {
2030   Register temp_reg = rbx;  // not part of any compiled calling seq
2031   if (VerifyOops) {
2032     for (int i = 0; i < method->size_of_parameters(); i++) {
2033       if (sig_bt[i] == T_OBJECT ||
2034           sig_bt[i] == T_ARRAY) {
2035         VMReg r = regs[i].first();
2036         assert(r->is_valid(), "bad oop arg");
2037         if (r->is_stack()) {
2038           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
2039           __ verify_oop(temp_reg);
2040         } else {
2041           __ verify_oop(r->as_Register());
2042         }
2043       }
2044     }
2045   }
2046 }
2047 
2048 static void gen_special_dispatch(MacroAssembler* masm,
2049                                  const methodHandle& method,
2050                                  const BasicType* sig_bt,
2051                                  const VMRegPair* regs) {
2052   verify_oop_args(masm, method, sig_bt, regs);
2053   vmIntrinsics::ID iid = method->intrinsic_id();
2054 
2055   // Now write the args into the outgoing interpreter space
2056   bool     has_receiver   = false;
2057   Register receiver_reg   = noreg;
2058   int      member_arg_pos = -1;
2059   Register member_reg     = noreg;
2060   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
2061   if (ref_kind != 0) {
2062     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
2063     member_reg = rbx;  // known to be free at this point
2064     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
2065   } else if (iid == vmIntrinsics::_invokeBasic) {
2066     has_receiver = true;
2067   } else {
2068     fatal("unexpected intrinsic id %d", iid);
2069   }
2070 
2071   if (member_reg != noreg) {
2072     // Load the member_arg into register, if necessary.
2073     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
2074     VMReg r = regs[member_arg_pos].first();
2075     if (r->is_stack()) {
2076       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
2077     } else {
2078       // no data motion is needed
2079       member_reg = r->as_Register();
2080     }
2081   }
2082 
2083   if (has_receiver) {
2084     // Make sure the receiver is loaded into a register.
2085     assert(method->size_of_parameters() > 0, "oob");
2086     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
2087     VMReg r = regs[0].first();
2088     assert(r->is_valid(), "bad receiver arg");
2089     if (r->is_stack()) {
2090       // Porting note:  This assumes that compiled calling conventions always
2091       // pass the receiver oop in a register.  If this is not true on some
2092       // platform, pick a temp and load the receiver from stack.
2093       fatal("receiver always in a register");
2094       receiver_reg = j_rarg0;  // known to be free at this point
2095       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
2096     } else {
2097       // no data motion is needed
2098       receiver_reg = r->as_Register();
2099     }
2100   }
2101 
2102   // Figure out which address we are really jumping to:
2103   MethodHandles::generate_method_handle_dispatch(masm, iid,
2104                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
2105 }
2106 
2107 // ---------------------------------------------------------------------------
2108 // Generate a native wrapper for a given method.  The method takes arguments
2109 // in the Java compiled code convention, marshals them to the native
2110 // convention (handlizes oops, etc), transitions to native, makes the call,
2111 // returns to java state (possibly blocking), unhandlizes any result and
2112 // returns.
2113 //
2114 // Critical native functions are a shorthand for the use of
2115 // GetPrimtiveArrayCritical and disallow the use of any other JNI
2116 // functions.  The wrapper is expected to unpack the arguments before
2117 // passing them to the callee and perform checks before and after the
2118 // native call to ensure that they GCLocker
2119 // lock_critical/unlock_critical semantics are followed.  Some other
2120 // parts of JNI setup are skipped like the tear down of the JNI handle
2121 // block and the check for pending exceptions it's impossible for them
2122 // to be thrown.
2123 //
2124 // They are roughly structured like this:
2125 //    if (GCLocker::needs_gc())
2126 //      SharedRuntime::block_for_jni_critical();
2127 //    tranistion to thread_in_native
2128 //    unpack arrray arguments and call native entry point
2129 //    check for safepoint in progress
2130 //    check if any thread suspend flags are set
2131 //      call into JVM and possible unlock the JNI critical
2132 //      if a GC was suppressed while in the critical native.
2133 //    transition back to thread_in_Java
2134 //    return to caller
2135 //
2136 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
2137                                                 const methodHandle& method,
2138                                                 int compile_id,
2139                                                 BasicType* in_sig_bt,
2140                                                 VMRegPair* in_regs,
2141                                                 BasicType ret_type) {
2142   if (method->is_method_handle_intrinsic()) {
2143     vmIntrinsics::ID iid = method->intrinsic_id();
2144     intptr_t start = (intptr_t)__ pc();
2145     int vep_offset = ((intptr_t)__ pc()) - start;
2146     gen_special_dispatch(masm,
2147                          method,
2148                          in_sig_bt,
2149                          in_regs);
2150     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
2151     __ flush();
2152     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
2153     return nmethod::new_native_nmethod(method,
2154                                        compile_id,
2155                                        masm->code(),
2156                                        vep_offset,
2157                                        frame_complete,
2158                                        stack_slots / VMRegImpl::slots_per_word,
2159                                        in_ByteSize(-1),
2160                                        in_ByteSize(-1),
2161                                        (OopMapSet*)NULL);
2162   }
2163   bool is_critical_native = true;
2164   address native_func = method->critical_native_function();
2165   if (native_func == NULL) {
2166     native_func = method->native_function();
2167     is_critical_native = false;
2168   }
2169   assert(native_func != NULL, "must have function");
2170 
2171   // An OopMap for lock (and class if static)
2172   OopMapSet *oop_maps = new OopMapSet();
2173   intptr_t start = (intptr_t)__ pc();
2174 
2175   // We have received a description of where all the java arg are located
2176   // on entry to the wrapper. We need to convert these args to where
2177   // the jni function will expect them. To figure out where they go
2178   // we convert the java signature to a C signature by inserting
2179   // the hidden arguments as arg[0] and possibly arg[1] (static method)
2180 
2181   const int total_in_args = method->size_of_parameters();
2182   int total_c_args = total_in_args;
2183   if (!is_critical_native) {
2184     total_c_args += 1;
2185     if (method->is_static()) {
2186       total_c_args++;
2187     }
2188   } else {
2189     for (int i = 0; i < total_in_args; i++) {
2190       if (in_sig_bt[i] == T_ARRAY) {
2191         total_c_args++;
2192       }
2193     }
2194   }
2195 
2196   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2197   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2198   BasicType* in_elem_bt = NULL;
2199 
2200   int argc = 0;
2201   if (!is_critical_native) {
2202     out_sig_bt[argc++] = T_ADDRESS;
2203     if (method->is_static()) {
2204       out_sig_bt[argc++] = T_OBJECT;
2205     }
2206 
2207     for (int i = 0; i < total_in_args ; i++ ) {
2208       out_sig_bt[argc++] = in_sig_bt[i];
2209     }
2210   } else {
2211     Thread* THREAD = Thread::current();
2212     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2213     SignatureStream ss(method->signature());
2214     for (int i = 0; i < total_in_args ; i++ ) {
2215       if (in_sig_bt[i] == T_ARRAY) {
2216         // Arrays are passed as int, elem* pair
2217         out_sig_bt[argc++] = T_INT;
2218         out_sig_bt[argc++] = T_ADDRESS;
2219         Symbol* atype = ss.as_symbol(CHECK_NULL);
2220         const char* at = atype->as_C_string();
2221         if (strlen(at) == 2) {
2222           assert(at[0] == '[', "must be");
2223           switch (at[1]) {
2224             case 'B': in_elem_bt[i]  = T_BYTE; break;
2225             case 'C': in_elem_bt[i]  = T_CHAR; break;
2226             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
2227             case 'F': in_elem_bt[i]  = T_FLOAT; break;
2228             case 'I': in_elem_bt[i]  = T_INT; break;
2229             case 'J': in_elem_bt[i]  = T_LONG; break;
2230             case 'S': in_elem_bt[i]  = T_SHORT; break;
2231             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
2232             default: ShouldNotReachHere();
2233           }
2234         }
2235       } else {
2236         out_sig_bt[argc++] = in_sig_bt[i];
2237         in_elem_bt[i] = T_VOID;
2238       }
2239       if (in_sig_bt[i] != T_VOID) {
2240         assert(in_sig_bt[i] == ss.type(), "must match");
2241         ss.next();
2242       }
2243     }
2244   }
2245 
2246   // Now figure out where the args must be stored and how much stack space
2247   // they require.
2248   int out_arg_slots;
2249   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2250 
2251   // Compute framesize for the wrapper.  We need to handlize all oops in
2252   // incoming registers
2253 
2254   // Calculate the total number of stack slots we will need.
2255 
2256   // First count the abi requirement plus all of the outgoing args
2257   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2258 
2259   // Now the space for the inbound oop handle area
2260   int total_save_slots = 6 * VMRegImpl::slots_per_word;  // 6 arguments passed in registers
2261   if (is_critical_native) {
2262     // Critical natives may have to call out so they need a save area
2263     // for register arguments.
2264     int double_slots = 0;
2265     int single_slots = 0;
2266     for ( int i = 0; i < total_in_args; i++) {
2267       if (in_regs[i].first()->is_Register()) {
2268         const Register reg = in_regs[i].first()->as_Register();
2269         switch (in_sig_bt[i]) {
2270           case T_BOOLEAN:
2271           case T_BYTE:
2272           case T_SHORT:
2273           case T_CHAR:
2274           case T_INT:  single_slots++; break;
2275           case T_ARRAY:  // specific to LP64 (7145024)
2276           case T_LONG: double_slots++; break;
2277           default:  ShouldNotReachHere();
2278         }
2279       } else if (in_regs[i].first()->is_XMMRegister()) {
2280         switch (in_sig_bt[i]) {
2281           case T_FLOAT:  single_slots++; break;
2282           case T_DOUBLE: double_slots++; break;
2283           default:  ShouldNotReachHere();
2284         }
2285       } else if (in_regs[i].first()->is_FloatRegister()) {
2286         ShouldNotReachHere();
2287       }
2288     }
2289     total_save_slots = double_slots * 2 + single_slots;
2290     // align the save area
2291     if (double_slots != 0) {
2292       stack_slots = align_up(stack_slots, 2);
2293     }
2294   }
2295 
2296   int oop_handle_offset = stack_slots;
2297   stack_slots += total_save_slots;
2298 
2299   // Now any space we need for handlizing a klass if static method
2300 
2301   int klass_slot_offset = 0;
2302   int klass_offset = -1;
2303   int lock_slot_offset = 0;
2304   bool is_static = false;
2305 
2306   if (method->is_static()) {
2307     klass_slot_offset = stack_slots;
2308     stack_slots += VMRegImpl::slots_per_word;
2309     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2310     is_static = true;
2311   }
2312 
2313   // Plus a lock if needed
2314 
2315   if (method->is_synchronized()) {
2316     lock_slot_offset = stack_slots;
2317     stack_slots += VMRegImpl::slots_per_word;
2318   }
2319 
2320   // Now a place (+2) to save return values or temp during shuffling
2321   // + 4 for return address (which we own) and saved rbp
2322   stack_slots += 6;
2323 
2324   // Ok The space we have allocated will look like:
2325   //
2326   //
2327   // FP-> |                     |
2328   //      |---------------------|
2329   //      | 2 slots for moves   |
2330   //      |---------------------|
2331   //      | lock box (if sync)  |
2332   //      |---------------------| <- lock_slot_offset
2333   //      | klass (if static)   |
2334   //      |---------------------| <- klass_slot_offset
2335   //      | oopHandle area      |
2336   //      |---------------------| <- oop_handle_offset (6 java arg registers)
2337   //      | outbound memory     |
2338   //      | based arguments     |
2339   //      |                     |
2340   //      |---------------------|
2341   //      |                     |
2342   // SP-> | out_preserved_slots |
2343   //
2344   //
2345 
2346 
2347   // Now compute actual number of stack words we need rounding to make
2348   // stack properly aligned.
2349   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
2350 
2351   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2352 
2353   // First thing make an ic check to see if we should even be here
2354 
2355   // We are free to use all registers as temps without saving them and
2356   // restoring them except rbp. rbp is the only callee save register
2357   // as far as the interpreter and the compiler(s) are concerned.
2358 
2359 
2360   const Register ic_reg = rax;
2361   const Register receiver = j_rarg0;
2362 
2363   Label hit;
2364   Label exception_pending;
2365 
2366   assert_different_registers(ic_reg, receiver, rscratch1);
2367   __ verify_oop(receiver);
2368   __ load_klass(rscratch1, receiver);
2369   __ cmpq(ic_reg, rscratch1);
2370   __ jcc(Assembler::equal, hit);
2371 
2372   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2373 
2374   // Verified entry point must be aligned
2375   __ align(8);
2376 
2377   __ bind(hit);
2378 
2379   int vep_offset = ((intptr_t)__ pc()) - start;
2380 
2381 #ifdef COMPILER1
2382   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
2383   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2384     inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
2385   }
2386 #endif // COMPILER1
2387 
2388   // The instruction at the verified entry point must be 5 bytes or longer
2389   // because it can be patched on the fly by make_non_entrant. The stack bang
2390   // instruction fits that requirement.
2391 
2392   // Generate stack overflow check
2393 
2394   if (UseStackBanging) {
2395     __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
2396   } else {
2397     // need a 5 byte instruction to allow MT safe patching to non-entrant
2398     __ fat_nop();
2399   }
2400 
2401   // Generate a new frame for the wrapper.
2402   __ enter();
2403   // -2 because return address is already present and so is saved rbp
2404   __ subptr(rsp, stack_size - 2*wordSize);
2405 
2406   // Frame is now completed as far as size and linkage.
2407   int frame_complete = ((intptr_t)__ pc()) - start;
2408 
2409     if (UseRTMLocking) {
2410       // Abort RTM transaction before calling JNI
2411       // because critical section will be large and will be
2412       // aborted anyway. Also nmethod could be deoptimized.
2413       __ xabort(0);
2414     }
2415 
2416 #ifdef ASSERT
2417     {
2418       Label L;
2419       __ mov(rax, rsp);
2420       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2421       __ cmpptr(rax, rsp);
2422       __ jcc(Assembler::equal, L);
2423       __ stop("improperly aligned stack");
2424       __ bind(L);
2425     }
2426 #endif /* ASSERT */
2427 
2428 
2429   // We use r14 as the oop handle for the receiver/klass
2430   // It is callee save so it survives the call to native
2431 
2432   const Register oop_handle_reg = r14;
2433 
2434   if (is_critical_native) {
2435     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2436                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2437   }
2438 
2439   //
2440   // We immediately shuffle the arguments so that any vm call we have to
2441   // make from here on out (sync slow path, jvmti, etc.) we will have
2442   // captured the oops from our caller and have a valid oopMap for
2443   // them.
2444 
2445   // -----------------
2446   // The Grand Shuffle
2447 
2448   // The Java calling convention is either equal (linux) or denser (win64) than the
2449   // c calling convention. However the because of the jni_env argument the c calling
2450   // convention always has at least one more (and two for static) arguments than Java.
2451   // Therefore if we move the args from java -> c backwards then we will never have
2452   // a register->register conflict and we don't have to build a dependency graph
2453   // and figure out how to break any cycles.
2454   //
2455 
2456   // Record esp-based slot for receiver on stack for non-static methods
2457   int receiver_offset = -1;
2458 
2459   // This is a trick. We double the stack slots so we can claim
2460   // the oops in the caller's frame. Since we are sure to have
2461   // more args than the caller doubling is enough to make
2462   // sure we can capture all the incoming oop args from the
2463   // caller.
2464   //
2465   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2466 
2467   // Mark location of rbp (someday)
2468   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2469 
2470   // Use eax, ebx as temporaries during any memory-memory moves we have to do
2471   // All inbound args are referenced based on rbp and all outbound args via rsp.
2472 
2473 
2474 #ifdef ASSERT
2475   bool reg_destroyed[RegisterImpl::number_of_registers];
2476   bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2477   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2478     reg_destroyed[r] = false;
2479   }
2480   for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2481     freg_destroyed[f] = false;
2482   }
2483 
2484 #endif /* ASSERT */
2485 
2486   // This may iterate in two different directions depending on the
2487   // kind of native it is.  The reason is that for regular JNI natives
2488   // the incoming and outgoing registers are offset upwards and for
2489   // critical natives they are offset down.
2490   GrowableArray<int> arg_order(2 * total_in_args);
2491   VMRegPair tmp_vmreg;
2492   tmp_vmreg.set1(rbx->as_VMReg());
2493 
2494   if (!is_critical_native) {
2495     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2496       arg_order.push(i);
2497       arg_order.push(c_arg);
2498     }
2499   } else {
2500     // Compute a valid move order, using tmp_vmreg to break any cycles
2501     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2502   }
2503 
2504   int temploc = -1;
2505   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2506     int i = arg_order.at(ai);
2507     int c_arg = arg_order.at(ai + 1);
2508     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2509     if (c_arg == -1) {
2510       assert(is_critical_native, "should only be required for critical natives");
2511       // This arg needs to be moved to a temporary
2512       __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2513       in_regs[i] = tmp_vmreg;
2514       temploc = i;
2515       continue;
2516     } else if (i == -1) {
2517       assert(is_critical_native, "should only be required for critical natives");
2518       // Read from the temporary location
2519       assert(temploc != -1, "must be valid");
2520       i = temploc;
2521       temploc = -1;
2522     }
2523 #ifdef ASSERT
2524     if (in_regs[i].first()->is_Register()) {
2525       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2526     } else if (in_regs[i].first()->is_XMMRegister()) {
2527       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2528     }
2529     if (out_regs[c_arg].first()->is_Register()) {
2530       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2531     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2532       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2533     }
2534 #endif /* ASSERT */
2535     switch (in_sig_bt[i]) {
2536       case T_ARRAY:
2537         if (is_critical_native) {
2538           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2539           c_arg++;
2540 #ifdef ASSERT
2541           if (out_regs[c_arg].first()->is_Register()) {
2542             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2543           } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2544             freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2545           }
2546 #endif
2547           break;
2548         }
2549       case T_OBJECT:
2550         assert(!is_critical_native, "no oop arguments");
2551         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2552                     ((i == 0) && (!is_static)),
2553                     &receiver_offset);
2554         break;
2555       case T_VOID:
2556         break;
2557 
2558       case T_FLOAT:
2559         float_move(masm, in_regs[i], out_regs[c_arg]);
2560           break;
2561 
2562       case T_DOUBLE:
2563         assert( i + 1 < total_in_args &&
2564                 in_sig_bt[i + 1] == T_VOID &&
2565                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2566         double_move(masm, in_regs[i], out_regs[c_arg]);
2567         break;
2568 
2569       case T_LONG :
2570         long_move(masm, in_regs[i], out_regs[c_arg]);
2571         break;
2572 
2573       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2574 
2575       default:
2576         move32_64(masm, in_regs[i], out_regs[c_arg]);
2577     }
2578   }
2579 
2580   int c_arg;
2581 
2582   // Pre-load a static method's oop into r14.  Used both by locking code and
2583   // the normal JNI call code.
2584   if (!is_critical_native) {
2585     // point c_arg at the first arg that is already loaded in case we
2586     // need to spill before we call out
2587     c_arg = total_c_args - total_in_args;
2588 
2589     if (method->is_static()) {
2590 
2591       //  load oop into a register
2592       __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2593 
2594       // Now handlize the static class mirror it's known not-null.
2595       __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2596       map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2597 
2598       // Now get the handle
2599       __ lea(oop_handle_reg, Address(rsp, klass_offset));
2600       // store the klass handle as second argument
2601       __ movptr(c_rarg1, oop_handle_reg);
2602       // and protect the arg if we must spill
2603       c_arg--;
2604     }
2605   } else {
2606     // For JNI critical methods we need to save all registers in save_args.
2607     c_arg = 0;
2608   }
2609 
2610   // Change state to native (we save the return address in the thread, since it might not
2611   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2612   // points into the right code segment. It does not have to be the correct return pc.
2613   // We use the same pc/oopMap repeatedly when we call out
2614 
2615   intptr_t the_pc = (intptr_t) __ pc();
2616   oop_maps->add_gc_map(the_pc - start, map);
2617 
2618   __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2619 
2620 
2621   // We have all of the arguments setup at this point. We must not touch any register
2622   // argument registers at this point (what if we save/restore them there are no oop?
2623 
2624   {
2625     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2626     // protect the args we've loaded
2627     save_args(masm, total_c_args, c_arg, out_regs);
2628     __ mov_metadata(c_rarg1, method());
2629     __ call_VM_leaf(
2630       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2631       r15_thread, c_rarg1);
2632     restore_args(masm, total_c_args, c_arg, out_regs);
2633   }
2634 
2635   // RedefineClasses() tracing support for obsolete method entry
2636   if (log_is_enabled(Trace, redefine, class, obsolete)) {
2637     // protect the args we've loaded
2638     save_args(masm, total_c_args, c_arg, out_regs);
2639     __ mov_metadata(c_rarg1, method());
2640     __ call_VM_leaf(
2641       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2642       r15_thread, c_rarg1);
2643     restore_args(masm, total_c_args, c_arg, out_regs);
2644   }
2645 
2646   // Lock a synchronized method
2647 
2648   // Register definitions used by locking and unlocking
2649 
2650   const Register swap_reg = rax;  // Must use rax for cmpxchg instruction
2651   const Register obj_reg  = rbx;  // Will contain the oop
2652   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
2653   const Register old_hdr  = r13;  // value of old header at unlock time
2654 
2655   Label slow_path_lock;
2656   Label lock_done;
2657 
2658   if (method->is_synchronized()) {
2659     assert(!is_critical_native, "unhandled");
2660 
2661 
2662     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2663 
2664     // Get the handle (the 2nd argument)
2665     __ mov(oop_handle_reg, c_rarg1);
2666 
2667     // Get address of the box
2668 
2669     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2670 
2671     // Load the oop from the handle
2672     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2673 
2674     if (UseBiasedLocking) {
2675       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2676     }
2677 
2678     // Load immediate 1 into swap_reg %rax
2679     __ movl(swap_reg, 1);
2680 
2681     // Load (object->mark() | 1) into swap_reg %rax
2682     __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2683 
2684     // Save (object->mark() | 1) into BasicLock's displaced header
2685     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2686 
2687     if (os::is_MP()) {
2688       __ lock();
2689     }
2690 
2691     // src -> dest iff dest == rax else rax <- dest
2692     __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2693     __ jcc(Assembler::equal, lock_done);
2694 
2695     // Hmm should this move to the slow path code area???
2696 
2697     // Test if the oopMark is an obvious stack pointer, i.e.,
2698     //  1) (mark & 3) == 0, and
2699     //  2) rsp <= mark < mark + os::pagesize()
2700     // These 3 tests can be done by evaluating the following
2701     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2702     // assuming both stack pointer and pagesize have their
2703     // least significant 2 bits clear.
2704     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2705 
2706     __ subptr(swap_reg, rsp);
2707     __ andptr(swap_reg, 3 - os::vm_page_size());
2708 
2709     // Save the test result, for recursive case, the result is zero
2710     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2711     __ jcc(Assembler::notEqual, slow_path_lock);
2712 
2713     // Slow path will re-enter here
2714 
2715     __ bind(lock_done);
2716   }
2717 
2718 
2719   // Finally just about ready to make the JNI call
2720 
2721 
2722   // get JNIEnv* which is first argument to native
2723   if (!is_critical_native) {
2724     __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2725   }
2726 
2727   // Now set thread in native
2728   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2729 
2730   __ call(RuntimeAddress(native_func));
2731 
2732   // Verify or restore cpu control state after JNI call
2733   __ restore_cpu_control_state_after_jni();
2734 
2735   // Unpack native results.
2736   switch (ret_type) {
2737   case T_BOOLEAN: __ c2bool(rax);            break;
2738   case T_CHAR   : __ movzwl(rax, rax);      break;
2739   case T_BYTE   : __ sign_extend_byte (rax); break;
2740   case T_SHORT  : __ sign_extend_short(rax); break;
2741   case T_INT    : /* nothing to do */        break;
2742   case T_DOUBLE :
2743   case T_FLOAT  :
2744     // Result is in xmm0 we'll save as needed
2745     break;
2746   case T_ARRAY:                 // Really a handle
2747   case T_OBJECT:                // Really a handle
2748       break; // can't de-handlize until after safepoint check
2749   case T_VOID: break;
2750   case T_LONG: break;
2751   default       : ShouldNotReachHere();
2752   }
2753 
2754   // Switch thread to "native transition" state before reading the synchronization state.
2755   // This additional state is necessary because reading and testing the synchronization
2756   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2757   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2758   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2759   //     Thread A is resumed to finish this native method, but doesn't block here since it
2760   //     didn't see any synchronization is progress, and escapes.
2761   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2762 
2763   if(os::is_MP()) {
2764     if (UseMembar) {
2765       // Force this write out before the read below
2766       __ membar(Assembler::Membar_mask_bits(
2767            Assembler::LoadLoad | Assembler::LoadStore |
2768            Assembler::StoreLoad | Assembler::StoreStore));
2769     } else {
2770       // Write serialization page so VM thread can do a pseudo remote membar.
2771       // We use the current thread pointer to calculate a thread specific
2772       // offset to write to within the page. This minimizes bus traffic
2773       // due to cache line collision.
2774       __ serialize_memory(r15_thread, rcx);
2775     }
2776   }
2777 
2778   Label after_transition;
2779 
2780   // check for safepoint operation in progress and/or pending suspend requests
2781   {
2782     Label Continue;
2783 
2784     __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2785              SafepointSynchronize::_not_synchronized);
2786 
2787     Label L;
2788     __ jcc(Assembler::notEqual, L);
2789     __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2790     __ jcc(Assembler::equal, Continue);
2791     __ bind(L);
2792 
2793     // Don't use call_VM as it will see a possible pending exception and forward it
2794     // and never return here preventing us from clearing _last_native_pc down below.
2795     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2796     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2797     // by hand.
2798     //
2799     __ vzeroupper();
2800     save_native_result(masm, ret_type, stack_slots);
2801     __ mov(c_rarg0, r15_thread);
2802     __ mov(r12, rsp); // remember sp
2803     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2804     __ andptr(rsp, -16); // align stack as required by ABI
2805     if (!is_critical_native) {
2806       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2807     } else {
2808       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2809     }
2810     __ mov(rsp, r12); // restore sp
2811     __ reinit_heapbase();
2812     // Restore any method result value
2813     restore_native_result(masm, ret_type, stack_slots);
2814 
2815     if (is_critical_native) {
2816       // The call above performed the transition to thread_in_Java so
2817       // skip the transition logic below.
2818       __ jmpb(after_transition);
2819     }
2820 
2821     __ bind(Continue);
2822   }
2823 
2824   // change thread state
2825   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2826   __ bind(after_transition);
2827 
2828   Label reguard;
2829   Label reguard_done;
2830   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2831   __ jcc(Assembler::equal, reguard);
2832   __ bind(reguard_done);
2833 
2834   // native result if any is live
2835 
2836   // Unlock
2837   Label unlock_done;
2838   Label slow_path_unlock;
2839   if (method->is_synchronized()) {
2840 
2841     // Get locked oop from the handle we passed to jni
2842     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2843 
2844     Label done;
2845 
2846     if (UseBiasedLocking) {
2847       __ biased_locking_exit(obj_reg, old_hdr, done);
2848     }
2849 
2850     // Simple recursive lock?
2851 
2852     __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2853     __ jcc(Assembler::equal, done);
2854 
2855     // Must save rax if if it is live now because cmpxchg must use it
2856     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2857       save_native_result(masm, ret_type, stack_slots);
2858     }
2859 
2860 
2861     // get address of the stack lock
2862     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2863     //  get old displaced header
2864     __ movptr(old_hdr, Address(rax, 0));
2865 
2866     // Atomic swap old header if oop still contains the stack lock
2867     if (os::is_MP()) {
2868       __ lock();
2869     }
2870     __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2871     __ jcc(Assembler::notEqual, slow_path_unlock);
2872 
2873     // slow path re-enters here
2874     __ bind(unlock_done);
2875     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2876       restore_native_result(masm, ret_type, stack_slots);
2877     }
2878 
2879     __ bind(done);
2880 
2881   }
2882   {
2883     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2884     save_native_result(masm, ret_type, stack_slots);
2885     __ mov_metadata(c_rarg1, method());
2886     __ call_VM_leaf(
2887          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2888          r15_thread, c_rarg1);
2889     restore_native_result(masm, ret_type, stack_slots);
2890   }
2891 
2892   __ reset_last_Java_frame(false);
2893 
2894   // Unbox oop result, e.g. JNIHandles::resolve value.
2895   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2896     __ resolve_jobject(rax /* value */,
2897                        r15_thread /* thread */,
2898                        rcx /* tmp */);
2899   }
2900 
2901   if (CheckJNICalls) {
2902     // clear_pending_jni_exception_check
2903     __ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2904   }
2905 
2906   if (!is_critical_native) {
2907     // reset handle block
2908     __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2909     __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2910   }
2911 
2912   // pop our frame
2913 
2914   __ leave();
2915 
2916   if (!is_critical_native) {
2917     // Any exception pending?
2918     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2919     __ jcc(Assembler::notEqual, exception_pending);
2920   }
2921 
2922   // Return
2923 
2924   __ ret(0);
2925 
2926   // Unexpected paths are out of line and go here
2927 
2928   if (!is_critical_native) {
2929     // forward the exception
2930     __ bind(exception_pending);
2931 
2932     // and forward the exception
2933     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2934   }
2935 
2936   // Slow path locking & unlocking
2937   if (method->is_synchronized()) {
2938 
2939     // BEGIN Slow path lock
2940     __ bind(slow_path_lock);
2941 
2942     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2943     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2944 
2945     // protect the args we've loaded
2946     save_args(masm, total_c_args, c_arg, out_regs);
2947 
2948     __ mov(c_rarg0, obj_reg);
2949     __ mov(c_rarg1, lock_reg);
2950     __ mov(c_rarg2, r15_thread);
2951 
2952     // Not a leaf but we have last_Java_frame setup as we want
2953     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2954     restore_args(masm, total_c_args, c_arg, out_regs);
2955 
2956 #ifdef ASSERT
2957     { Label L;
2958     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2959     __ jcc(Assembler::equal, L);
2960     __ stop("no pending exception allowed on exit from monitorenter");
2961     __ bind(L);
2962     }
2963 #endif
2964     __ jmp(lock_done);
2965 
2966     // END Slow path lock
2967 
2968     // BEGIN Slow path unlock
2969     __ bind(slow_path_unlock);
2970 
2971     // If we haven't already saved the native result we must save it now as xmm registers
2972     // are still exposed.
2973     __ vzeroupper();
2974     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2975       save_native_result(masm, ret_type, stack_slots);
2976     }
2977 
2978     __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2979 
2980     __ mov(c_rarg0, obj_reg);
2981     __ mov(c_rarg2, r15_thread);
2982     __ mov(r12, rsp); // remember sp
2983     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2984     __ andptr(rsp, -16); // align stack as required by ABI
2985 
2986     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2987     // NOTE that obj_reg == rbx currently
2988     __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2989     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2990 
2991     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2992     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2993     __ mov(rsp, r12); // restore sp
2994     __ reinit_heapbase();
2995 #ifdef ASSERT
2996     {
2997       Label L;
2998       __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2999       __ jcc(Assembler::equal, L);
3000       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
3001       __ bind(L);
3002     }
3003 #endif /* ASSERT */
3004 
3005     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
3006 
3007     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
3008       restore_native_result(masm, ret_type, stack_slots);
3009     }
3010     __ jmp(unlock_done);
3011 
3012     // END Slow path unlock
3013 
3014   } // synchronized
3015 
3016   // SLOW PATH Reguard the stack if needed
3017 
3018   __ bind(reguard);
3019   __ vzeroupper();
3020   save_native_result(masm, ret_type, stack_slots);
3021   __ mov(r12, rsp); // remember sp
3022   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3023   __ andptr(rsp, -16); // align stack as required by ABI
3024   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
3025   __ mov(rsp, r12); // restore sp
3026   __ reinit_heapbase();
3027   restore_native_result(masm, ret_type, stack_slots);
3028   // and continue
3029   __ jmp(reguard_done);
3030 
3031 
3032 
3033   __ flush();
3034 
3035   nmethod *nm = nmethod::new_native_nmethod(method,
3036                                             compile_id,
3037                                             masm->code(),
3038                                             vep_offset,
3039                                             frame_complete,
3040                                             stack_slots / VMRegImpl::slots_per_word,
3041                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
3042                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
3043                                             oop_maps);
3044 
3045   if (is_critical_native) {
3046     nm->set_lazy_critical_native(true);
3047   }
3048 
3049   return nm;
3050 
3051 }
3052 
3053 // this function returns the adjust size (in number of words) to a c2i adapter
3054 // activation for use during deoptimization
3055 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
3056   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3057 }
3058 
3059 
3060 uint SharedRuntime::out_preserve_stack_slots() {
3061   return 0;
3062 }
3063 
3064 //------------------------------generate_deopt_blob----------------------------
3065 void SharedRuntime::generate_deopt_blob() {
3066   // Allocate space for the code
3067   ResourceMark rm;
3068   // Setup code generation tools
3069   int pad = 0;
3070 #if INCLUDE_JVMCI
3071   if (EnableJVMCI || UseAOT) {
3072     pad += 512; // Increase the buffer size when compiling for JVMCI
3073   }
3074 #endif
3075   CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
3076   MacroAssembler* masm = new MacroAssembler(&buffer);
3077   int frame_size_in_words;
3078   OopMap* map = NULL;
3079   OopMapSet *oop_maps = new OopMapSet();
3080 
3081   // -------------
3082   // This code enters when returning to a de-optimized nmethod.  A return
3083   // address has been pushed on the the stack, and return values are in
3084   // registers.
3085   // If we are doing a normal deopt then we were called from the patched
3086   // nmethod from the point we returned to the nmethod. So the return
3087   // address on the stack is wrong by NativeCall::instruction_size
3088   // We will adjust the value so it looks like we have the original return
3089   // address on the stack (like when we eagerly deoptimized).
3090   // In the case of an exception pending when deoptimizing, we enter
3091   // with a return address on the stack that points after the call we patched
3092   // into the exception handler. We have the following register state from,
3093   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
3094   //    rax: exception oop
3095   //    rbx: exception handler
3096   //    rdx: throwing pc
3097   // So in this case we simply jam rdx into the useless return address and
3098   // the stack looks just like we want.
3099   //
3100   // At this point we need to de-opt.  We save the argument return
3101   // registers.  We call the first C routine, fetch_unroll_info().  This
3102   // routine captures the return values and returns a structure which
3103   // describes the current frame size and the sizes of all replacement frames.
3104   // The current frame is compiled code and may contain many inlined
3105   // functions, each with their own JVM state.  We pop the current frame, then
3106   // push all the new frames.  Then we call the C routine unpack_frames() to
3107   // populate these frames.  Finally unpack_frames() returns us the new target
3108   // address.  Notice that callee-save registers are BLOWN here; they have
3109   // already been captured in the vframeArray at the time the return PC was
3110   // patched.
3111   address start = __ pc();
3112   Label cont;
3113 
3114   // Prolog for non exception case!
3115 
3116   // Save everything in sight.
3117   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3118 
3119   // Normal deoptimization.  Save exec mode for unpack_frames.
3120   __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
3121   __ jmp(cont);
3122 
3123   int reexecute_offset = __ pc() - start;
3124 #if INCLUDE_JVMCI && !defined(COMPILER1)
3125   if (EnableJVMCI && UseJVMCICompiler) {
3126     // JVMCI does not use this kind of deoptimization
3127     __ should_not_reach_here();
3128   }
3129 #endif
3130 
3131   // Reexecute case
3132   // return address is the pc describes what bci to do re-execute at
3133 
3134   // No need to update map as each call to save_live_registers will produce identical oopmap
3135   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3136 
3137   __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
3138   __ jmp(cont);
3139 
3140 #if INCLUDE_JVMCI
3141   Label after_fetch_unroll_info_call;
3142   int implicit_exception_uncommon_trap_offset = 0;
3143   int uncommon_trap_offset = 0;
3144 
3145   if (EnableJVMCI || UseAOT) {
3146     implicit_exception_uncommon_trap_offset = __ pc() - start;
3147 
3148     __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
3149     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD);
3150 
3151     uncommon_trap_offset = __ pc() - start;
3152 
3153     // Save everything in sight.
3154     RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3155     // fetch_unroll_info needs to call last_java_frame()
3156     __ set_last_Java_frame(noreg, noreg, NULL);
3157 
3158     __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
3159     __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
3160 
3161     __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
3162     __ mov(c_rarg0, r15_thread);
3163     __ movl(c_rarg2, r14); // exec mode
3164     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3165     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
3166 
3167     __ reset_last_Java_frame(false);
3168 
3169     __ jmp(after_fetch_unroll_info_call);
3170   } // EnableJVMCI
3171 #endif // INCLUDE_JVMCI
3172 
3173   int exception_offset = __ pc() - start;
3174 
3175   // Prolog for exception case
3176 
3177   // all registers are dead at this entry point, except for rax, and
3178   // rdx which contain the exception oop and exception pc
3179   // respectively.  Set them in TLS and fall thru to the
3180   // unpack_with_exception_in_tls entry point.
3181 
3182   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3183   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3184 
3185   int exception_in_tls_offset = __ pc() - start;
3186 
3187   // new implementation because exception oop is now passed in JavaThread
3188 
3189   // Prolog for exception case
3190   // All registers must be preserved because they might be used by LinearScan
3191   // Exceptiop oop and throwing PC are passed in JavaThread
3192   // tos: stack at point of call to method that threw the exception (i.e. only
3193   // args are on the stack, no return address)
3194 
3195   // make room on stack for the return address
3196   // It will be patched later with the throwing pc. The correct value is not
3197   // available now because loading it from memory would destroy registers.
3198   __ push(0);
3199 
3200   // Save everything in sight.
3201   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3202 
3203   // Now it is safe to overwrite any register
3204 
3205   // Deopt during an exception.  Save exec mode for unpack_frames.
3206   __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3207 
3208   // load throwing pc from JavaThread and patch it as the return address
3209   // of the current frame. Then clear the field in JavaThread
3210 
3211   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3212   __ movptr(Address(rbp, wordSize), rdx);
3213   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3214 
3215 #ifdef ASSERT
3216   // verify that there is really an exception oop in JavaThread
3217   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3218   __ verify_oop(rax);
3219 
3220   // verify that there is no pending exception
3221   Label no_pending_exception;
3222   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3223   __ testptr(rax, rax);
3224   __ jcc(Assembler::zero, no_pending_exception);
3225   __ stop("must not have pending exception here");
3226   __ bind(no_pending_exception);
3227 #endif
3228 
3229   __ bind(cont);
3230 
3231   // Call C code.  Need thread and this frame, but NOT official VM entry
3232   // crud.  We cannot block on this call, no GC can happen.
3233   //
3234   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3235 
3236   // fetch_unroll_info needs to call last_java_frame().
3237 
3238   __ set_last_Java_frame(noreg, noreg, NULL);
3239 #ifdef ASSERT
3240   { Label L;
3241     __ cmpptr(Address(r15_thread,
3242                     JavaThread::last_Java_fp_offset()),
3243             (int32_t)0);
3244     __ jcc(Assembler::equal, L);
3245     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3246     __ bind(L);
3247   }
3248 #endif // ASSERT
3249   __ mov(c_rarg0, r15_thread);
3250   __ movl(c_rarg1, r14); // exec_mode
3251   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3252 
3253   // Need to have an oopmap that tells fetch_unroll_info where to
3254   // find any register it might need.
3255   oop_maps->add_gc_map(__ pc() - start, map);
3256 
3257   __ reset_last_Java_frame(false);
3258 
3259 #if INCLUDE_JVMCI
3260   if (EnableJVMCI || UseAOT) {
3261     __ bind(after_fetch_unroll_info_call);
3262   }
3263 #endif
3264 
3265   // Load UnrollBlock* into rdi
3266   __ mov(rdi, rax);
3267 
3268   __ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
3269    Label noException;
3270   __ cmpl(r14, Deoptimization::Unpack_exception);   // Was exception pending?
3271   __ jcc(Assembler::notEqual, noException);
3272   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3273   // QQQ this is useless it was NULL above
3274   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3275   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3276   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3277 
3278   __ verify_oop(rax);
3279 
3280   // Overwrite the result registers with the exception results.
3281   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3282   // I think this is useless
3283   __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3284 
3285   __ bind(noException);
3286 
3287   // Only register save data is on the stack.
3288   // Now restore the result registers.  Everything else is either dead
3289   // or captured in the vframeArray.
3290   RegisterSaver::restore_result_registers(masm);
3291 
3292   // All of the register save area has been popped of the stack. Only the
3293   // return address remains.
3294 
3295   // Pop all the frames we must move/replace.
3296   //
3297   // Frame picture (youngest to oldest)
3298   // 1: self-frame (no frame link)
3299   // 2: deopting frame  (no frame link)
3300   // 3: caller of deopting frame (could be compiled/interpreted).
3301   //
3302   // Note: by leaving the return address of self-frame on the stack
3303   // and using the size of frame 2 to adjust the stack
3304   // when we are done the return to frame 3 will still be on the stack.
3305 
3306   // Pop deoptimized frame
3307   __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3308   __ addptr(rsp, rcx);
3309 
3310   // rsp should be pointing at the return address to the caller (3)
3311 
3312   // Pick up the initial fp we should save
3313   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3314   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3315 
3316 #ifdef ASSERT
3317   // Compilers generate code that bang the stack by as much as the
3318   // interpreter would need. So this stack banging should never
3319   // trigger a fault. Verify that it does not on non product builds.
3320   if (UseStackBanging) {
3321     __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3322     __ bang_stack_size(rbx, rcx);
3323   }
3324 #endif
3325 
3326   // Load address of array of frame pcs into rcx
3327   __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3328 
3329   // Trash the old pc
3330   __ addptr(rsp, wordSize);
3331 
3332   // Load address of array of frame sizes into rsi
3333   __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3334 
3335   // Load counter into rdx
3336   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3337 
3338   // Now adjust the caller's stack to make up for the extra locals
3339   // but record the original sp so that we can save it in the skeletal interpreter
3340   // frame and the stack walking of interpreter_sender will get the unextended sp
3341   // value and not the "real" sp value.
3342 
3343   const Register sender_sp = r8;
3344 
3345   __ mov(sender_sp, rsp);
3346   __ movl(rbx, Address(rdi,
3347                        Deoptimization::UnrollBlock::
3348                        caller_adjustment_offset_in_bytes()));
3349   __ subptr(rsp, rbx);
3350 
3351   // Push interpreter frames in a loop
3352   Label loop;
3353   __ bind(loop);
3354   __ movptr(rbx, Address(rsi, 0));      // Load frame size
3355   __ subptr(rbx, 2*wordSize);           // We'll push pc and ebp by hand
3356   __ pushptr(Address(rcx, 0));          // Save return address
3357   __ enter();                           // Save old & set new ebp
3358   __ subptr(rsp, rbx);                  // Prolog
3359   // This value is corrected by layout_activation_impl
3360   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3361   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3362   __ mov(sender_sp, rsp);               // Pass sender_sp to next frame
3363   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
3364   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
3365   __ decrementl(rdx);                   // Decrement counter
3366   __ jcc(Assembler::notZero, loop);
3367   __ pushptr(Address(rcx, 0));          // Save final return address
3368 
3369   // Re-push self-frame
3370   __ enter();                           // Save old & set new ebp
3371 
3372   // Allocate a full sized register save area.
3373   // Return address and rbp are in place, so we allocate two less words.
3374   __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3375 
3376   // Restore frame locals after moving the frame
3377   __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3378   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3379 
3380   // Call C code.  Need thread but NOT official VM entry
3381   // crud.  We cannot block on this call, no GC can happen.  Call should
3382   // restore return values to their stack-slots with the new SP.
3383   //
3384   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3385 
3386   // Use rbp because the frames look interpreted now
3387   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3388   // Don't need the precise return PC here, just precise enough to point into this code blob.
3389   address the_pc = __ pc();
3390   __ set_last_Java_frame(noreg, rbp, the_pc);
3391 
3392   __ andptr(rsp, -(StackAlignmentInBytes));  // Fix stack alignment as required by ABI
3393   __ mov(c_rarg0, r15_thread);
3394   __ movl(c_rarg1, r14); // second arg: exec_mode
3395   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3396   // Revert SP alignment after call since we're going to do some SP relative addressing below
3397   __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3398 
3399   // Set an oopmap for the call site
3400   // Use the same PC we used for the last java frame
3401   oop_maps->add_gc_map(the_pc - start,
3402                        new OopMap( frame_size_in_words, 0 ));
3403 
3404   // Clear fp AND pc
3405   __ reset_last_Java_frame(true);
3406 
3407   // Collect return values
3408   __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3409   __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3410   // I think this is useless (throwing pc?)
3411   __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3412 
3413   // Pop self-frame.
3414   __ leave();                           // Epilog
3415 
3416   // Jump to interpreter
3417   __ ret(0);
3418 
3419   // Make sure all code is generated
3420   masm->flush();
3421 
3422   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3423   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3424 #if INCLUDE_JVMCI
3425   if (EnableJVMCI || UseAOT) {
3426     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
3427     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
3428   }
3429 #endif
3430 }
3431 
3432 #ifdef COMPILER2
3433 //------------------------------generate_uncommon_trap_blob--------------------
3434 void SharedRuntime::generate_uncommon_trap_blob() {
3435   // Allocate space for the code
3436   ResourceMark rm;
3437   // Setup code generation tools
3438   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3439   MacroAssembler* masm = new MacroAssembler(&buffer);
3440 
3441   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3442 
3443   address start = __ pc();
3444 
3445   if (UseRTMLocking) {
3446     // Abort RTM transaction before possible nmethod deoptimization.
3447     __ xabort(0);
3448   }
3449 
3450   // Push self-frame.  We get here with a return address on the
3451   // stack, so rsp is 8-byte aligned until we allocate our frame.
3452   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3453 
3454   // No callee saved registers. rbp is assumed implicitly saved
3455   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3456 
3457   // compiler left unloaded_class_index in j_rarg0 move to where the
3458   // runtime expects it.
3459   __ movl(c_rarg1, j_rarg0);
3460 
3461   __ set_last_Java_frame(noreg, noreg, NULL);
3462 
3463   // Call C code.  Need thread but NOT official VM entry
3464   // crud.  We cannot block on this call, no GC can happen.  Call should
3465   // capture callee-saved registers as well as return values.
3466   // Thread is in rdi already.
3467   //
3468   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3469 
3470   __ mov(c_rarg0, r15_thread);
3471   __ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
3472   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3473 
3474   // Set an oopmap for the call site
3475   OopMapSet* oop_maps = new OopMapSet();
3476   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3477 
3478   // location of rbp is known implicitly by the frame sender code
3479 
3480   oop_maps->add_gc_map(__ pc() - start, map);
3481 
3482   __ reset_last_Java_frame(false);
3483 
3484   // Load UnrollBlock* into rdi
3485   __ mov(rdi, rax);
3486 
3487 #ifdef ASSERT
3488   { Label L;
3489     __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
3490             (int32_t)Deoptimization::Unpack_uncommon_trap);
3491     __ jcc(Assembler::equal, L);
3492     __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3493     __ bind(L);
3494   }
3495 #endif
3496 
3497   // Pop all the frames we must move/replace.
3498   //
3499   // Frame picture (youngest to oldest)
3500   // 1: self-frame (no frame link)
3501   // 2: deopting frame  (no frame link)
3502   // 3: caller of deopting frame (could be compiled/interpreted).
3503 
3504   // Pop self-frame.  We have no frame, and must rely only on rax and rsp.
3505   __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3506 
3507   // Pop deoptimized frame (int)
3508   __ movl(rcx, Address(rdi,
3509                        Deoptimization::UnrollBlock::
3510                        size_of_deoptimized_frame_offset_in_bytes()));
3511   __ addptr(rsp, rcx);
3512 
3513   // rsp should be pointing at the return address to the caller (3)
3514 
3515   // Pick up the initial fp we should save
3516   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3517   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3518 
3519 #ifdef ASSERT
3520   // Compilers generate code that bang the stack by as much as the
3521   // interpreter would need. So this stack banging should never
3522   // trigger a fault. Verify that it does not on non product builds.
3523   if (UseStackBanging) {
3524     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3525     __ bang_stack_size(rbx, rcx);
3526   }
3527 #endif
3528 
3529   // Load address of array of frame pcs into rcx (address*)
3530   __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3531 
3532   // Trash the return pc
3533   __ addptr(rsp, wordSize);
3534 
3535   // Load address of array of frame sizes into rsi (intptr_t*)
3536   __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3537 
3538   // Counter
3539   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3540 
3541   // Now adjust the caller's stack to make up for the extra locals but
3542   // record the original sp so that we can save it in the skeletal
3543   // interpreter frame and the stack walking of interpreter_sender
3544   // will get the unextended sp value and not the "real" sp value.
3545 
3546   const Register sender_sp = r8;
3547 
3548   __ mov(sender_sp, rsp);
3549   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3550   __ subptr(rsp, rbx);
3551 
3552   // Push interpreter frames in a loop
3553   Label loop;
3554   __ bind(loop);
3555   __ movptr(rbx, Address(rsi, 0)); // Load frame size
3556   __ subptr(rbx, 2 * wordSize);    // We'll push pc and rbp by hand
3557   __ pushptr(Address(rcx, 0));     // Save return address
3558   __ enter();                      // Save old & set new rbp
3559   __ subptr(rsp, rbx);             // Prolog
3560   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3561             sender_sp);            // Make it walkable
3562   // This value is corrected by layout_activation_impl
3563   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3564   __ mov(sender_sp, rsp);          // Pass sender_sp to next frame
3565   __ addptr(rsi, wordSize);        // Bump array pointer (sizes)
3566   __ addptr(rcx, wordSize);        // Bump array pointer (pcs)
3567   __ decrementl(rdx);              // Decrement counter
3568   __ jcc(Assembler::notZero, loop);
3569   __ pushptr(Address(rcx, 0));     // Save final return address
3570 
3571   // Re-push self-frame
3572   __ enter();                 // Save old & set new rbp
3573   __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3574                               // Prolog
3575 
3576   // Use rbp because the frames look interpreted now
3577   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3578   // Don't need the precise return PC here, just precise enough to point into this code blob.
3579   address the_pc = __ pc();
3580   __ set_last_Java_frame(noreg, rbp, the_pc);
3581 
3582   // Call C code.  Need thread but NOT official VM entry
3583   // crud.  We cannot block on this call, no GC can happen.  Call should
3584   // restore return values to their stack-slots with the new SP.
3585   // Thread is in rdi already.
3586   //
3587   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3588 
3589   __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3590   __ mov(c_rarg0, r15_thread);
3591   __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3592   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3593 
3594   // Set an oopmap for the call site
3595   // Use the same PC we used for the last java frame
3596   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3597 
3598   // Clear fp AND pc
3599   __ reset_last_Java_frame(true);
3600 
3601   // Pop self-frame.
3602   __ leave();                 // Epilog
3603 
3604   // Jump to interpreter
3605   __ ret(0);
3606 
3607   // Make sure all code is generated
3608   masm->flush();
3609 
3610   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
3611                                                  SimpleRuntimeFrame::framesize >> 1);
3612 }
3613 #endif // COMPILER2
3614 
3615 
3616 //------------------------------generate_handler_blob------
3617 //
3618 // Generate a special Compile2Runtime blob that saves all registers,
3619 // and setup oopmap.
3620 //
3621 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3622   assert(StubRoutines::forward_exception_entry() != NULL,
3623          "must be generated before");
3624 
3625   ResourceMark rm;
3626   OopMapSet *oop_maps = new OopMapSet();
3627   OopMap* map;
3628 
3629   // Allocate space for the code.  Setup code generation tools.
3630   CodeBuffer buffer("handler_blob", 2048, 1024);
3631   MacroAssembler* masm = new MacroAssembler(&buffer);
3632 
3633   address start   = __ pc();
3634   address call_pc = NULL;
3635   int frame_size_in_words;
3636   bool cause_return = (poll_type == POLL_AT_RETURN);
3637   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3638 
3639   if (UseRTMLocking) {
3640     // Abort RTM transaction before calling runtime
3641     // because critical section will be large and will be
3642     // aborted anyway. Also nmethod could be deoptimized.
3643     __ xabort(0);
3644   }
3645 
3646   // Make room for return address (or push it again)
3647   if (!cause_return) {
3648     __ push(rbx);
3649   }
3650 
3651   // Save registers, fpu state, and flags
3652   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3653 
3654   // The following is basically a call_VM.  However, we need the precise
3655   // address of the call in order to generate an oopmap. Hence, we do all the
3656   // work outselves.
3657 
3658   __ set_last_Java_frame(noreg, noreg, NULL);
3659 
3660   // The return address must always be correct so that frame constructor never
3661   // sees an invalid pc.
3662 
3663   if (!cause_return) {
3664     // overwrite the dummy value we pushed on entry
3665     __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3666     __ movptr(Address(rbp, wordSize), c_rarg0);
3667   }
3668 
3669   // Do the call
3670   __ mov(c_rarg0, r15_thread);
3671   __ call(RuntimeAddress(call_ptr));
3672 
3673   // Set an oopmap for the call site.  This oopmap will map all
3674   // oop-registers and debug-info registers as callee-saved.  This
3675   // will allow deoptimization at this safepoint to find all possible
3676   // debug-info recordings, as well as let GC find all oops.
3677 
3678   oop_maps->add_gc_map( __ pc() - start, map);
3679 
3680   Label noException;
3681 
3682   __ reset_last_Java_frame(false);
3683 
3684   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3685   __ jcc(Assembler::equal, noException);
3686 
3687   // Exception pending
3688 
3689   RegisterSaver::restore_live_registers(masm, save_vectors);
3690 
3691   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3692 
3693   // No exception case
3694   __ bind(noException);
3695 
3696   // Normal exit, restore registers and exit.
3697   RegisterSaver::restore_live_registers(masm, save_vectors);
3698 
3699   __ ret(0);
3700 
3701   // Make sure all code is generated
3702   masm->flush();
3703 
3704   // Fill-out other meta info
3705   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3706 }
3707 
3708 //
3709 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3710 //
3711 // Generate a stub that calls into vm to find out the proper destination
3712 // of a java call. All the argument registers are live at this point
3713 // but since this is generic code we don't know what they are and the caller
3714 // must do any gc of the args.
3715 //
3716 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3717   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3718 
3719   // allocate space for the code
3720   ResourceMark rm;
3721 
3722   CodeBuffer buffer(name, 1000, 512);
3723   MacroAssembler* masm                = new MacroAssembler(&buffer);
3724 
3725   int frame_size_in_words;
3726 
3727   OopMapSet *oop_maps = new OopMapSet();
3728   OopMap* map = NULL;
3729 
3730   int start = __ offset();
3731 
3732   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3733 
3734   int frame_complete = __ offset();
3735 
3736   __ set_last_Java_frame(noreg, noreg, NULL);
3737 
3738   __ mov(c_rarg0, r15_thread);
3739 
3740   __ call(RuntimeAddress(destination));
3741 
3742 
3743   // Set an oopmap for the call site.
3744   // We need this not only for callee-saved registers, but also for volatile
3745   // registers that the compiler might be keeping live across a safepoint.
3746 
3747   oop_maps->add_gc_map( __ offset() - start, map);
3748 
3749   // rax contains the address we are going to jump to assuming no exception got installed
3750 
3751   // clear last_Java_sp
3752   __ reset_last_Java_frame(false);
3753   // check for pending exceptions
3754   Label pending;
3755   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3756   __ jcc(Assembler::notEqual, pending);
3757 
3758   // get the returned Method*
3759   __ get_vm_result_2(rbx, r15_thread);
3760   __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3761 
3762   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3763 
3764   RegisterSaver::restore_live_registers(masm);
3765 
3766   // We are back the the original state on entry and ready to go.
3767 
3768   __ jmp(rax);
3769 
3770   // Pending exception after the safepoint
3771 
3772   __ bind(pending);
3773 
3774   RegisterSaver::restore_live_registers(masm);
3775 
3776   // exception pending => remove activation and forward to exception handler
3777 
3778   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3779 
3780   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3781   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3782 
3783   // -------------
3784   // make sure all code is generated
3785   masm->flush();
3786 
3787   // return the  blob
3788   // frame_size_words or bytes??
3789   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3790 }
3791 
3792 
3793 //------------------------------Montgomery multiplication------------------------
3794 //
3795 
3796 #ifndef _WINDOWS
3797 
3798 #define ASM_SUBTRACT
3799 
3800 #ifdef ASM_SUBTRACT
3801 // Subtract 0:b from carry:a.  Return carry.
3802 static unsigned long
3803 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3804   long i = 0, cnt = len;
3805   unsigned long tmp;
3806   asm volatile("clc; "
3807                "0: ; "
3808                "mov (%[b], %[i], 8), %[tmp]; "
3809                "sbb %[tmp], (%[a], %[i], 8); "
3810                "inc %[i]; dec %[cnt]; "
3811                "jne 0b; "
3812                "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3813                : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3814                : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3815                : "memory");
3816   return tmp;
3817 }
3818 #else // ASM_SUBTRACT
3819 typedef int __attribute__((mode(TI))) int128;
3820 
3821 // Subtract 0:b from carry:a.  Return carry.
3822 static unsigned long
3823 sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) {
3824   int128 tmp = 0;
3825   int i;
3826   for (i = 0; i < len; i++) {
3827     tmp += a[i];
3828     tmp -= b[i];
3829     a[i] = tmp;
3830     tmp >>= 64;
3831     assert(-1 <= tmp && tmp <= 0, "invariant");
3832   }
3833   return tmp + carry;
3834 }
3835 #endif // ! ASM_SUBTRACT
3836 
3837 // Multiply (unsigned) Long A by Long B, accumulating the double-
3838 // length result into the accumulator formed of T0, T1, and T2.
3839 #define MACC(A, B, T0, T1, T2)                                  \
3840 do {                                                            \
3841   unsigned long hi, lo;                                         \
3842   __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4"   \
3843            : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)  \
3844            : "r"(A), "a"(B) : "cc");                            \
3845  } while(0)
3846 
3847 // As above, but add twice the double-length result into the
3848 // accumulator.
3849 #define MACC2(A, B, T0, T1, T2)                                 \
3850 do {                                                            \
3851   unsigned long hi, lo;                                         \
3852   __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \
3853            "add %%rax, %2; adc %%rdx, %3; adc $0, %4"           \
3854            : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)  \
3855            : "r"(A), "a"(B) : "cc");                            \
3856  } while(0)
3857 
3858 // Fast Montgomery multiplication.  The derivation of the algorithm is
3859 // in  A Cryptographic Library for the Motorola DSP56000,
3860 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
3861 
3862 static void __attribute__((noinline))
3863 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
3864                     unsigned long m[], unsigned long inv, int len) {
3865   unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3866   int i;
3867 
3868   assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3869 
3870   for (i = 0; i < len; i++) {
3871     int j;
3872     for (j = 0; j < i; j++) {
3873       MACC(a[j], b[i-j], t0, t1, t2);
3874       MACC(m[j], n[i-j], t0, t1, t2);
3875     }
3876     MACC(a[i], b[0], t0, t1, t2);
3877     m[i] = t0 * inv;
3878     MACC(m[i], n[0], t0, t1, t2);
3879 
3880     assert(t0 == 0, "broken Montgomery multiply");
3881 
3882     t0 = t1; t1 = t2; t2 = 0;
3883   }
3884 
3885   for (i = len; i < 2*len; i++) {
3886     int j;
3887     for (j = i-len+1; j < len; j++) {
3888       MACC(a[j], b[i-j], t0, t1, t2);
3889       MACC(m[j], n[i-j], t0, t1, t2);
3890     }
3891     m[i-len] = t0;
3892     t0 = t1; t1 = t2; t2 = 0;
3893   }
3894 
3895   while (t0)
3896     t0 = sub(m, n, t0, len);
3897 }
3898 
3899 // Fast Montgomery squaring.  This uses asymptotically 25% fewer
3900 // multiplies so it should be up to 25% faster than Montgomery
3901 // multiplication.  However, its loop control is more complex and it
3902 // may actually run slower on some machines.
3903 
3904 static void __attribute__((noinline))
3905 montgomery_square(unsigned long a[], unsigned long n[],
3906                   unsigned long m[], unsigned long inv, int len) {
3907   unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3908   int i;
3909 
3910   assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3911 
3912   for (i = 0; i < len; i++) {
3913     int j;
3914     int end = (i+1)/2;
3915     for (j = 0; j < end; j++) {
3916       MACC2(a[j], a[i-j], t0, t1, t2);
3917       MACC(m[j], n[i-j], t0, t1, t2);
3918     }
3919     if ((i & 1) == 0) {
3920       MACC(a[j], a[j], t0, t1, t2);
3921     }
3922     for (; j < i; j++) {
3923       MACC(m[j], n[i-j], t0, t1, t2);
3924     }
3925     m[i] = t0 * inv;
3926     MACC(m[i], n[0], t0, t1, t2);
3927 
3928     assert(t0 == 0, "broken Montgomery square");
3929 
3930     t0 = t1; t1 = t2; t2 = 0;
3931   }
3932 
3933   for (i = len; i < 2*len; i++) {
3934     int start = i-len+1;
3935     int end = start + (len - start)/2;
3936     int j;
3937     for (j = start; j < end; j++) {
3938       MACC2(a[j], a[i-j], t0, t1, t2);
3939       MACC(m[j], n[i-j], t0, t1, t2);
3940     }
3941     if ((i & 1) == 0) {
3942       MACC(a[j], a[j], t0, t1, t2);
3943     }
3944     for (; j < len; j++) {
3945       MACC(m[j], n[i-j], t0, t1, t2);
3946     }
3947     m[i-len] = t0;
3948     t0 = t1; t1 = t2; t2 = 0;
3949   }
3950 
3951   while (t0)
3952     t0 = sub(m, n, t0, len);
3953 }
3954 
3955 // Swap words in a longword.
3956 static unsigned long swap(unsigned long x) {
3957   return (x << 32) | (x >> 32);
3958 }
3959 
3960 // Copy len longwords from s to d, word-swapping as we go.  The
3961 // destination array is reversed.
3962 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
3963   d += len;
3964   while(len-- > 0) {
3965     d--;
3966     *d = swap(*s);
3967     s++;
3968   }
3969 }
3970 
3971 // The threshold at which squaring is advantageous was determined
3972 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3973 #define MONTGOMERY_SQUARING_THRESHOLD 64
3974 
3975 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3976                                         jint len, jlong inv,
3977                                         jint *m_ints) {
3978   assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3979   int longwords = len/2;
3980 
3981   // Make very sure we don't use so much space that the stack might
3982   // overflow.  512 jints corresponds to an 16384-bit integer and
3983   // will use here a total of 8k bytes of stack space.
3984   int total_allocation = longwords * sizeof (unsigned long) * 4;
3985   guarantee(total_allocation <= 8192, "must be");
3986   unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3987 
3988   // Local scratch arrays
3989   unsigned long
3990     *a = scratch + 0 * longwords,
3991     *b = scratch + 1 * longwords,
3992     *n = scratch + 2 * longwords,
3993     *m = scratch + 3 * longwords;
3994 
3995   reverse_words((unsigned long *)a_ints, a, longwords);
3996   reverse_words((unsigned long *)b_ints, b, longwords);
3997   reverse_words((unsigned long *)n_ints, n, longwords);
3998 
3999   ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
4000 
4001   reverse_words(m, (unsigned long *)m_ints, longwords);
4002 }
4003 
4004 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
4005                                       jint len, jlong inv,
4006                                       jint *m_ints) {
4007   assert(len % 2 == 0, "array length in montgomery_square must be even");
4008   int longwords = len/2;
4009 
4010   // Make very sure we don't use so much space that the stack might
4011   // overflow.  512 jints corresponds to an 16384-bit integer and
4012   // will use here a total of 6k bytes of stack space.
4013   int total_allocation = longwords * sizeof (unsigned long) * 3;
4014   guarantee(total_allocation <= 8192, "must be");
4015   unsigned long *scratch = (unsigned long *)alloca(total_allocation);
4016 
4017   // Local scratch arrays
4018   unsigned long
4019     *a = scratch + 0 * longwords,
4020     *n = scratch + 1 * longwords,
4021     *m = scratch + 2 * longwords;
4022 
4023   reverse_words((unsigned long *)a_ints, a, longwords);
4024   reverse_words((unsigned long *)n_ints, n, longwords);
4025 
4026   if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
4027     ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
4028   } else {
4029     ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
4030   }
4031 
4032   reverse_words(m, (unsigned long *)m_ints, longwords);
4033 }
4034 
4035 #endif // WINDOWS
4036 
4037 #ifdef COMPILER2
4038 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
4039 //
4040 //------------------------------generate_exception_blob---------------------------
4041 // creates exception blob at the end
4042 // Using exception blob, this code is jumped from a compiled method.
4043 // (see emit_exception_handler in x86_64.ad file)
4044 //
4045 // Given an exception pc at a call we call into the runtime for the
4046 // handler in this method. This handler might merely restore state
4047 // (i.e. callee save registers) unwind the frame and jump to the
4048 // exception handler for the nmethod if there is no Java level handler
4049 // for the nmethod.
4050 //
4051 // This code is entered with a jmp.
4052 //
4053 // Arguments:
4054 //   rax: exception oop
4055 //   rdx: exception pc
4056 //
4057 // Results:
4058 //   rax: exception oop
4059 //   rdx: exception pc in caller or ???
4060 //   destination: exception handler of caller
4061 //
4062 // Note: the exception pc MUST be at a call (precise debug information)
4063 //       Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
4064 //
4065 
4066 void OptoRuntime::generate_exception_blob() {
4067   assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
4068   assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
4069   assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
4070 
4071   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
4072 
4073   // Allocate space for the code
4074   ResourceMark rm;
4075   // Setup code generation tools
4076   CodeBuffer buffer("exception_blob", 2048, 1024);
4077   MacroAssembler* masm = new MacroAssembler(&buffer);
4078 
4079 
4080   address start = __ pc();
4081 
4082   // Exception pc is 'return address' for stack walker
4083   __ push(rdx);
4084   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
4085 
4086   // Save callee-saved registers.  See x86_64.ad.
4087 
4088   // rbp is an implicitly saved callee saved register (i.e., the calling
4089   // convention will save/restore it in the prolog/epilog). Other than that
4090   // there are no callee save registers now that adapter frames are gone.
4091 
4092   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
4093 
4094   // Store exception in Thread object. We cannot pass any arguments to the
4095   // handle_exception call, since we do not want to make any assumption
4096   // about the size of the frame where the exception happened in.
4097   // c_rarg0 is either rdi (Linux) or rcx (Windows).
4098   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
4099   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
4100 
4101   // This call does all the hard work.  It checks if an exception handler
4102   // exists in the method.
4103   // If so, it returns the handler address.
4104   // If not, it prepares for stack-unwinding, restoring the callee-save
4105   // registers of the frame being removed.
4106   //
4107   // address OptoRuntime::handle_exception_C(JavaThread* thread)
4108 
4109   // At a method handle call, the stack may not be properly aligned
4110   // when returning with an exception.
4111   address the_pc = __ pc();
4112   __ set_last_Java_frame(noreg, noreg, the_pc);
4113   __ mov(c_rarg0, r15_thread);
4114   __ andptr(rsp, -(StackAlignmentInBytes));    // Align stack
4115   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
4116 
4117   // Set an oopmap for the call site.  This oopmap will only be used if we
4118   // are unwinding the stack.  Hence, all locations will be dead.
4119   // Callee-saved registers will be the same as the frame above (i.e.,
4120   // handle_exception_stub), since they were restored when we got the
4121   // exception.
4122 
4123   OopMapSet* oop_maps = new OopMapSet();
4124 
4125   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
4126 
4127   __ reset_last_Java_frame(false);
4128 
4129   // Restore callee-saved registers
4130 
4131   // rbp is an implicitly saved callee-saved register (i.e., the calling
4132   // convention will save restore it in prolog/epilog) Other than that
4133   // there are no callee save registers now that adapter frames are gone.
4134 
4135   __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4136 
4137   __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4138   __ pop(rdx);                  // No need for exception pc anymore
4139 
4140   // rax: exception handler
4141 
4142   // We have a handler in rax (could be deopt blob).
4143   __ mov(r8, rax);
4144 
4145   // Get the exception oop
4146   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4147   // Get the exception pc in case we are deoptimized
4148   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4149 #ifdef ASSERT
4150   __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4151   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4152 #endif
4153   // Clear the exception oop so GC no longer processes it as a root.
4154   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4155 
4156   // rax: exception oop
4157   // r8:  exception handler
4158   // rdx: exception pc
4159   // Jump to handler
4160 
4161   __ jmp(r8);
4162 
4163   // Make sure all code is generated
4164   masm->flush();
4165 
4166   // Set exception blob
4167   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4168 }
4169 #endif // COMPILER2
4170 
4171 BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) {
4172   BufferBlob* buf = BufferBlob::create("value types pack/unpack", 16 * K);
4173   CodeBuffer buffer(buf);
4174   short buffer_locs[20];
4175   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
4176                                          sizeof(buffer_locs)/sizeof(relocInfo));
4177 
4178   MacroAssembler _masm(&buffer);
4179   MacroAssembler* masm = &_masm;
4180 
4181   const Array<SigEntry>* sig_vk = vk->extended_sig();
4182   const Array<VMRegPair>* regs = vk->return_regs();
4183 
4184   int pack_fields_off = __ offset();
4185 
4186   int j = 1;
4187   for (int i = 0; i < sig_vk->length(); i++) {
4188     BasicType bt = sig_vk->at(i)._bt;
4189     if (bt == T_VALUETYPE) {
4190       continue;
4191     }
4192     if (bt == T_VOID) {
4193       if (sig_vk->at(i-1)._bt == T_LONG ||
4194           sig_vk->at(i-1)._bt == T_DOUBLE) {
4195         j++;
4196       }
4197       continue;
4198     }
4199     int off = sig_vk->at(i)._offset;
4200     VMRegPair pair = regs->at(j);
4201     VMReg r_1 = pair.first();
4202     VMReg r_2 = pair.second();
4203     Address to(rax, off);
4204     if (bt == T_FLOAT) {
4205       __ movflt(to, r_1->as_XMMRegister());
4206     } else if (bt == T_DOUBLE) {
4207       __ movdbl(to, r_1->as_XMMRegister());
4208     } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {
4209       __ store_heap_oop(to, r_1->as_Register());
4210     } else {
4211       assert(is_java_primitive(bt), "unexpected basic type");
4212       size_t size_in_bytes = type2aelembytes(bt);
4213       __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
4214     }
4215     j++;
4216   }
4217   assert(j == regs->length(), "missed a field?");
4218 
4219   __ ret(0);
4220 
4221   int unpack_fields_off = __ offset();
4222 
4223   j = 1;
4224   for (int i = 0; i < sig_vk->length(); i++) {
4225     BasicType bt = sig_vk->at(i)._bt;
4226     if (bt == T_VALUETYPE) {
4227       continue;
4228     }
4229     if (bt == T_VOID) {
4230       if (sig_vk->at(i-1)._bt == T_LONG ||
4231           sig_vk->at(i-1)._bt == T_DOUBLE) {
4232         j++;
4233       }
4234       continue;
4235     }
4236     int off = sig_vk->at(i)._offset;
4237     VMRegPair pair = regs->at(j);
4238     VMReg r_1 = pair.first();
4239     VMReg r_2 = pair.second();
4240     Address from(rax, off);
4241     if (bt == T_FLOAT) {
4242       __ movflt(r_1->as_XMMRegister(), from);
4243     } else if (bt == T_DOUBLE) {
4244       __ movdbl(r_1->as_XMMRegister(), from);
4245     } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {
4246       __ load_heap_oop(r_1->as_Register(), from);
4247     } else {
4248       assert(is_java_primitive(bt), "unexpected basic type");
4249       size_t size_in_bytes = type2aelembytes(bt);
4250       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
4251     }
4252     j++;
4253   }
4254   assert(j == regs->length(), "missed a field?");
4255 
4256   if (StressValueTypeReturnedAsFields) {
4257     __ load_klass(rax, rax);
4258     __ orptr(rax, 1);
4259   }
4260 
4261   __ ret(0);
4262 
4263   __ flush();
4264 
4265   return BufferedValueTypeBlob::create(&buffer, pack_fields_off, unpack_fields_off);
4266 }