1 /*
   2  * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interp_masm.hpp"
  34 #include "logging/log.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/compiledICHolder.hpp"
  37 #include "runtime/safepointMechanism.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/vframeArray.hpp"
  40 #include "utilities/align.hpp"
  41 #include "vmreg_aarch64.inline.hpp"
  42 #ifdef COMPILER1
  43 #include "c1/c1_Runtime1.hpp"
  44 #endif
  45 #if COMPILER2_OR_JVMCI
  46 #include "adfiles/ad_aarch64.hpp"
  47 #include "opto/runtime.hpp"
  48 #endif
  49 #if INCLUDE_JVMCI
  50 #include "jvmci/jvmciJavaClasses.hpp"
  51 #endif
  52 
  53 #define __ masm->
  54 
  55 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  56 
  57 class SimpleRuntimeFrame {
  58 
  59   public:
  60 
  61   // Most of the runtime stubs have this simple frame layout.
  62   // This class exists to make the layout shared in one place.
  63   // Offsets are for compiler stack slots, which are jints.
  64   enum layout {
  65     // The frame sender code expects that rbp will be in the "natural" place and
  66     // will override any oopMap setting for it. We must therefore force the layout
  67     // so that it agrees with the frame sender code.
  68     // we don't expect any arg reg save area so aarch64 asserts that
  69     // frame::arg_reg_save_area_bytes == 0
  70     rbp_off = 0,
  71     rbp_off2,
  72     return_off, return_off2,
  73     framesize
  74   };
  75 };
  76 
  77 // FIXME -- this is used by C1
  78 class RegisterSaver {
  79  public:
  80   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
  81   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
  82 
  83   // Offsets into the register save area
  84   // Used by deoptimization when it is managing result register
  85   // values on its own
  86 
  87   static int r0_offset_in_bytes(void)    { return (32 + r0->encoding()) * wordSize; }
  88   static int reg_offset_in_bytes(Register r)    { return r0_offset_in_bytes() + r->encoding() * wordSize; }
  89   static int rmethod_offset_in_bytes(void)    { return reg_offset_in_bytes(rmethod); }
  90   static int rscratch1_offset_in_bytes(void)    { return (32 + rscratch1->encoding()) * wordSize; }
  91   static int v0_offset_in_bytes(void)   { return 0; }
  92   static int return_offset_in_bytes(void) { return (32 /* floats*/ + 31 /* gregs*/) * wordSize; }
  93 
  94   // During deoptimization only the result registers need to be restored,
  95   // all the other values have already been extracted.
  96   static void restore_result_registers(MacroAssembler* masm);
  97 
  98     // Capture info about frame layout
  99   enum layout {
 100                 fpu_state_off = 0,
 101                 fpu_state_end = fpu_state_off+FPUStateSizeInWords-1,
 102                 // The frame sender code expects that rfp will be in
 103                 // the "natural" place and will override any oopMap
 104                 // setting for it. We must therefore force the layout
 105                 // so that it agrees with the frame sender code.
 106                 r0_off = fpu_state_off+FPUStateSizeInWords,
 107                 rfp_off = r0_off + 30 * 2,
 108                 return_off = rfp_off + 2,      // slot for return address
 109                 reg_save_size = return_off + 2};
 110 
 111 };
 112 
 113 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
 114 #if COMPILER2_OR_JVMCI
 115   if (save_vectors) {
 116     // Save upper half of vector registers
 117     int vect_words = 32 * 8 / wordSize;
 118     additional_frame_words += vect_words;
 119   }
 120 #else
 121   assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
 122 #endif
 123 
 124   int frame_size_in_bytes = align_up(additional_frame_words*wordSize +
 125                                      reg_save_size*BytesPerInt, 16);
 126   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 127   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 128   // The caller will allocate additional_frame_words
 129   int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
 130   // CodeBlob frame size is in words.
 131   int frame_size_in_words = frame_size_in_bytes / wordSize;
 132   *total_frame_words = frame_size_in_words;
 133 
 134   // Save Integer and Float registers.
 135   __ enter();
 136   __ push_CPU_state(save_vectors);
 137 
 138   // Set an oopmap for the call site.  This oopmap will map all
 139   // oop-registers and debug-info registers as callee-saved.  This
 140   // will allow deoptimization at this safepoint to find all possible
 141   // debug-info recordings, as well as let GC find all oops.
 142 
 143   OopMapSet *oop_maps = new OopMapSet();
 144   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 145 
 146   for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
 147     Register r = as_Register(i);
 148     if (r < rheapbase && r != rscratch1 && r != rscratch2) {
 149       int sp_offset = 2 * (i + 32); // SP offsets are in 4-byte words,
 150                                     // register slots are 8 bytes
 151                                     // wide, 32 floating-point
 152                                     // registers
 153       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots),
 154                                 r->as_VMReg());
 155     }
 156   }
 157 
 158   for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
 159     FloatRegister r = as_FloatRegister(i);
 160     int sp_offset = save_vectors ? (4 * i) : (2 * i);
 161     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
 162                               r->as_VMReg());
 163   }
 164 
 165   return oop_map;
 166 }
 167 
 168 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 169 #ifndef COMPILER2
 170   assert(!restore_vectors, "vectors are generated only by C2 and JVMCI");
 171 #endif
 172   __ pop_CPU_state(restore_vectors);
 173   __ leave();
 174 }
 175 
 176 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 177 
 178   // Just restore result register. Only used by deoptimization. By
 179   // now any callee save register that needs to be restored to a c2
 180   // caller of the deoptee has been extracted into the vframeArray
 181   // and will be stuffed into the c2i adapter we create for later
 182   // restoration so only result registers need to be restored here.
 183 
 184   // Restore fp result register
 185   __ ldrd(v0, Address(sp, v0_offset_in_bytes()));
 186   // Restore integer result register
 187   __ ldr(r0, Address(sp, r0_offset_in_bytes()));
 188 
 189   // Pop all of the register save are off the stack
 190   __ add(sp, sp, align_up(return_offset_in_bytes(), 16));
 191 }
 192 
 193 // Is vector's size (in bytes) bigger than a size saved by default?
 194 // 8 bytes vector registers are saved by default on AArch64.
 195 bool SharedRuntime::is_wide_vector(int size) {
 196   return size > 8;
 197 }
 198 
 199 size_t SharedRuntime::trampoline_size() {
 200   return 16;
 201 }
 202 
 203 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 204   __ mov(rscratch1, destination);
 205   __ br(rscratch1);
 206 }
 207 
 208 // The java_calling_convention describes stack locations as ideal slots on
 209 // a frame with no abi restrictions. Since we must observe abi restrictions
 210 // (like the placement of the register window) the slots must be biased by
 211 // the following value.
 212 static int reg2offset_in(VMReg r) {
 213   // Account for saved rfp and lr
 214   // This should really be in_preserve_stack_slots
 215   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
 216 }
 217 
 218 static int reg2offset_out(VMReg r) {
 219   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 220 }
 221 
 222 // ---------------------------------------------------------------------------
 223 // Read the array of BasicTypes from a signature, and compute where the
 224 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 225 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 226 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 227 // as framesizes are fixed.
 228 // VMRegImpl::stack0 refers to the first slot 0(sp).
 229 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 230 // up to RegisterImpl::number_of_registers) are the 64-bit
 231 // integer registers.
 232 
 233 // Note: the INPUTS in sig_bt are in units of Java argument words,
 234 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 235 
 236 // The Java calling convention is a "shifted" version of the C ABI.
 237 // By skipping the first C ABI register we can call non-static jni
 238 // methods with small numbers of arguments without having to shuffle
 239 // the arguments at all. Since we control the java ABI we ought to at
 240 // least get some advantage out of it.
 241 
 242 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 243                                            VMRegPair *regs,
 244                                            int total_args_passed,
 245                                            int is_outgoing) {
 246 
 247   // Create the mapping between argument positions and
 248   // registers.
 249   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 250     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 251   };
 252   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 253     j_farg0, j_farg1, j_farg2, j_farg3,
 254     j_farg4, j_farg5, j_farg6, j_farg7
 255   };
 256 
 257 
 258   uint int_args = 0;
 259   uint fp_args = 0;
 260   uint stk_args = 0; // inc by 2 each time
 261 
 262   for (int i = 0; i < total_args_passed; i++) {
 263     switch (sig_bt[i]) {
 264     case T_BOOLEAN:
 265     case T_CHAR:
 266     case T_BYTE:
 267     case T_SHORT:
 268     case T_INT:
 269       if (int_args < Argument::n_int_register_parameters_j) {
 270         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 271       } else {
 272         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 273         stk_args += 2;
 274       }
 275       break;
 276     case T_VOID:
 277       // halves of T_LONG or T_DOUBLE
 278       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 279       regs[i].set_bad();
 280       break;
 281     case T_LONG:
 282       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 283       // fall through
 284     case T_OBJECT:
 285     case T_ARRAY:
 286     case T_ADDRESS:
 287       if (int_args < Argument::n_int_register_parameters_j) {
 288         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 289       } else {
 290         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 291         stk_args += 2;
 292       }
 293       break;
 294     case T_FLOAT:
 295       if (fp_args < Argument::n_float_register_parameters_j) {
 296         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 297       } else {
 298         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 299         stk_args += 2;
 300       }
 301       break;
 302     case T_DOUBLE:
 303       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 304       if (fp_args < Argument::n_float_register_parameters_j) {
 305         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 306       } else {
 307         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 308         stk_args += 2;
 309       }
 310       break;
 311     default:
 312       ShouldNotReachHere();
 313       break;
 314     }
 315   }
 316 
 317   return align_up(stk_args, 2);
 318 }
 319 
 320 // Patch the callers callsite with entry to compiled code if it exists.
 321 static void patch_callers_callsite(MacroAssembler *masm) {
 322   Label L;
 323   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 324   __ cbz(rscratch1, L);
 325 
 326   __ enter();
 327   __ push_CPU_state();
 328 
 329   // VM needs caller's callsite
 330   // VM needs target method
 331   // This needs to be a long call since we will relocate this adapter to
 332   // the codeBuffer and it may not reach
 333 
 334 #ifndef PRODUCT
 335   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 336 #endif
 337 
 338   __ mov(c_rarg0, rmethod);
 339   __ mov(c_rarg1, lr);
 340   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 341   __ blr(rscratch1);
 342   __ maybe_isb();
 343 
 344   __ pop_CPU_state();
 345   // restore sp
 346   __ leave();
 347   __ bind(L);
 348 }
 349 
 350 static void gen_c2i_adapter(MacroAssembler *masm,
 351                             int total_args_passed,
 352                             int comp_args_on_stack,
 353                             const BasicType *sig_bt,
 354                             const VMRegPair *regs,
 355                             Label& skip_fixup) {
 356   // Before we get into the guts of the C2I adapter, see if we should be here
 357   // at all.  We've come from compiled code and are attempting to jump to the
 358   // interpreter, which means the caller made a static call to get here
 359   // (vcalls always get a compiled target if there is one).  Check for a
 360   // compiled target.  If there is one, we need to patch the caller's call.
 361   patch_callers_callsite(masm);
 362 
 363   __ bind(skip_fixup);
 364 
 365   int words_pushed = 0;
 366 
 367   // Since all args are passed on the stack, total_args_passed *
 368   // Interpreter::stackElementSize is the space we need.
 369 
 370   int extraspace = total_args_passed * Interpreter::stackElementSize;
 371 
 372   __ mov(r13, sp);
 373 
 374   // stack is aligned, keep it that way
 375   extraspace = align_up(extraspace, 2*wordSize);
 376 
 377   if (extraspace)
 378     __ sub(sp, sp, extraspace);
 379 
 380   // Now write the args into the outgoing interpreter space
 381   for (int i = 0; i < total_args_passed; i++) {
 382     if (sig_bt[i] == T_VOID) {
 383       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 384       continue;
 385     }
 386 
 387     // offset to start parameters
 388     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 389     int next_off = st_off - Interpreter::stackElementSize;
 390 
 391     // Say 4 args:
 392     // i   st_off
 393     // 0   32 T_LONG
 394     // 1   24 T_VOID
 395     // 2   16 T_OBJECT
 396     // 3    8 T_BOOL
 397     // -    0 return address
 398     //
 399     // However to make thing extra confusing. Because we can fit a long/double in
 400     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 401     // leaves one slot empty and only stores to a single slot. In this case the
 402     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 403 
 404     VMReg r_1 = regs[i].first();
 405     VMReg r_2 = regs[i].second();
 406     if (!r_1->is_valid()) {
 407       assert(!r_2->is_valid(), "");
 408       continue;
 409     }
 410     if (r_1->is_stack()) {
 411       // memory to memory use rscratch1
 412       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 413                     + extraspace
 414                     + words_pushed * wordSize);
 415       if (!r_2->is_valid()) {
 416         // sign extend??
 417         __ ldrw(rscratch1, Address(sp, ld_off));
 418         __ str(rscratch1, Address(sp, st_off));
 419 
 420       } else {
 421 
 422         __ ldr(rscratch1, Address(sp, ld_off));
 423 
 424         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 425         // T_DOUBLE and T_LONG use two slots in the interpreter
 426         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 427           // ld_off == LSW, ld_off+wordSize == MSW
 428           // st_off == MSW, next_off == LSW
 429           __ str(rscratch1, Address(sp, next_off));
 430 #ifdef ASSERT
 431           // Overwrite the unused slot with known junk
 432           __ mov(rscratch1, 0xdeadffffdeadaaaaul);
 433           __ str(rscratch1, Address(sp, st_off));
 434 #endif /* ASSERT */
 435         } else {
 436           __ str(rscratch1, Address(sp, st_off));
 437         }
 438       }
 439     } else if (r_1->is_Register()) {
 440       Register r = r_1->as_Register();
 441       if (!r_2->is_valid()) {
 442         // must be only an int (or less ) so move only 32bits to slot
 443         // why not sign extend??
 444         __ str(r, Address(sp, st_off));
 445       } else {
 446         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 447         // T_DOUBLE and T_LONG use two slots in the interpreter
 448         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 449           // long/double in gpr
 450 #ifdef ASSERT
 451           // Overwrite the unused slot with known junk
 452           __ mov(rscratch1, 0xdeadffffdeadaaabul);
 453           __ str(rscratch1, Address(sp, st_off));
 454 #endif /* ASSERT */
 455           __ str(r, Address(sp, next_off));
 456         } else {
 457           __ str(r, Address(sp, st_off));
 458         }
 459       }
 460     } else {
 461       assert(r_1->is_FloatRegister(), "");
 462       if (!r_2->is_valid()) {
 463         // only a float use just part of the slot
 464         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 465       } else {
 466 #ifdef ASSERT
 467         // Overwrite the unused slot with known junk
 468         __ mov(rscratch1, 0xdeadffffdeadaaacul);
 469         __ str(rscratch1, Address(sp, st_off));
 470 #endif /* ASSERT */
 471         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 472       }
 473     }
 474   }
 475 
 476   __ mov(esp, sp); // Interp expects args on caller's expression stack
 477 
 478   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 479   __ br(rscratch1);
 480 }
 481 
 482 
 483 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 484                                     int total_args_passed,
 485                                     int comp_args_on_stack,
 486                                     const BasicType *sig_bt,
 487                                     const VMRegPair *regs) {
 488 
 489   // Note: r13 contains the senderSP on entry. We must preserve it since
 490   // we may do a i2c -> c2i transition if we lose a race where compiled
 491   // code goes non-entrant while we get args ready.
 492 
 493   // In addition we use r13 to locate all the interpreter args because
 494   // we must align the stack to 16 bytes.
 495 
 496   // Adapters are frameless.
 497 
 498   // An i2c adapter is frameless because the *caller* frame, which is
 499   // interpreted, routinely repairs its own esp (from
 500   // interpreter_frame_last_sp), even if a callee has modified the
 501   // stack pointer.  It also recalculates and aligns sp.
 502 
 503   // A c2i adapter is frameless because the *callee* frame, which is
 504   // interpreted, routinely repairs its caller's sp (from sender_sp,
 505   // which is set up via the senderSP register).
 506 
 507   // In other words, if *either* the caller or callee is interpreted, we can
 508   // get the stack pointer repaired after a call.
 509 
 510   // This is why c2i and i2c adapters cannot be indefinitely composed.
 511   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 512   // both caller and callee would be compiled methods, and neither would
 513   // clean up the stack pointer changes performed by the two adapters.
 514   // If this happens, control eventually transfers back to the compiled
 515   // caller, but with an uncorrected stack, causing delayed havoc.
 516 
 517   if (VerifyAdapterCalls &&
 518       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 519 #if 0
 520     // So, let's test for cascading c2i/i2c adapters right now.
 521     //  assert(Interpreter::contains($return_addr) ||
 522     //         StubRoutines::contains($return_addr),
 523     //         "i2c adapter must return to an interpreter frame");
 524     __ block_comment("verify_i2c { ");
 525     Label L_ok;
 526     if (Interpreter::code() != NULL)
 527       range_check(masm, rax, r11,
 528                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 529                   L_ok);
 530     if (StubRoutines::code1() != NULL)
 531       range_check(masm, rax, r11,
 532                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 533                   L_ok);
 534     if (StubRoutines::code2() != NULL)
 535       range_check(masm, rax, r11,
 536                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 537                   L_ok);
 538     const char* msg = "i2c adapter must return to an interpreter frame";
 539     __ block_comment(msg);
 540     __ stop(msg);
 541     __ bind(L_ok);
 542     __ block_comment("} verify_i2ce ");
 543 #endif
 544   }
 545 
 546   // Cut-out for having no stack args.
 547   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 548   if (comp_args_on_stack) {
 549     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 550     __ andr(sp, rscratch1, -16);
 551   }
 552 
 553   // Will jump to the compiled code just as if compiled code was doing it.
 554   // Pre-load the register-jump target early, to schedule it better.
 555   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 556 
 557 #if INCLUDE_JVMCI
 558   if (EnableJVMCI || UseAOT) {
 559     // check if this call should be routed towards a specific entry point
 560     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 561     Label no_alternative_target;
 562     __ cbz(rscratch2, no_alternative_target);
 563     __ mov(rscratch1, rscratch2);
 564     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 565     __ bind(no_alternative_target);
 566   }
 567 #endif // INCLUDE_JVMCI
 568 
 569   // Now generate the shuffle code.
 570   for (int i = 0; i < total_args_passed; i++) {
 571     if (sig_bt[i] == T_VOID) {
 572       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 573       continue;
 574     }
 575 
 576     // Pick up 0, 1 or 2 words from SP+offset.
 577 
 578     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 579             "scrambled load targets?");
 580     // Load in argument order going down.
 581     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 582     // Point to interpreter value (vs. tag)
 583     int next_off = ld_off - Interpreter::stackElementSize;
 584     //
 585     //
 586     //
 587     VMReg r_1 = regs[i].first();
 588     VMReg r_2 = regs[i].second();
 589     if (!r_1->is_valid()) {
 590       assert(!r_2->is_valid(), "");
 591       continue;
 592     }
 593     if (r_1->is_stack()) {
 594       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 595       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 596       if (!r_2->is_valid()) {
 597         // sign extend???
 598         __ ldrsw(rscratch2, Address(esp, ld_off));
 599         __ str(rscratch2, Address(sp, st_off));
 600       } else {
 601         //
 602         // We are using two optoregs. This can be either T_OBJECT,
 603         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 604         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 605         // So we must adjust where to pick up the data to match the
 606         // interpreter.
 607         //
 608         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 609         // are accessed as negative so LSW is at LOW address
 610 
 611         // ld_off is MSW so get LSW
 612         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 613                            next_off : ld_off;
 614         __ ldr(rscratch2, Address(esp, offset));
 615         // st_off is LSW (i.e. reg.first())
 616         __ str(rscratch2, Address(sp, st_off));
 617       }
 618     } else if (r_1->is_Register()) {  // Register argument
 619       Register r = r_1->as_Register();
 620       if (r_2->is_valid()) {
 621         //
 622         // We are using two VMRegs. This can be either T_OBJECT,
 623         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 624         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 625         // So we must adjust where to pick up the data to match the
 626         // interpreter.
 627 
 628         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 629                            next_off : ld_off;
 630 
 631         // this can be a misaligned move
 632         __ ldr(r, Address(esp, offset));
 633       } else {
 634         // sign extend and use a full word?
 635         __ ldrw(r, Address(esp, ld_off));
 636       }
 637     } else {
 638       if (!r_2->is_valid()) {
 639         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 640       } else {
 641         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 642       }
 643     }
 644   }
 645 
 646   // 6243940 We might end up in handle_wrong_method if
 647   // the callee is deoptimized as we race thru here. If that
 648   // happens we don't want to take a safepoint because the
 649   // caller frame will look interpreted and arguments are now
 650   // "compiled" so it is much better to make this transition
 651   // invisible to the stack walking code. Unfortunately if
 652   // we try and find the callee by normal means a safepoint
 653   // is possible. So we stash the desired callee in the thread
 654   // and the vm will find there should this case occur.
 655 
 656   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 657 
 658   __ br(rscratch1);
 659 }
 660 
 661 // ---------------------------------------------------------------
 662 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 663                                                             int total_args_passed,
 664                                                             int comp_args_on_stack,
 665                                                             const BasicType *sig_bt,
 666                                                             const VMRegPair *regs,
 667                                                             AdapterFingerPrint* fingerprint) {
 668   address i2c_entry = __ pc();
 669 
 670   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 671 
 672   address c2i_unverified_entry = __ pc();
 673   Label skip_fixup;
 674 
 675   Label ok;
 676 
 677   Register holder = rscratch2;
 678   Register receiver = j_rarg0;
 679   Register tmp = r10;  // A call-clobbered register not used for arg passing
 680 
 681   // -------------------------------------------------------------------------
 682   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 683   // to the interpreter.  The args start out packed in the compiled layout.  They
 684   // need to be unpacked into the interpreter layout.  This will almost always
 685   // require some stack space.  We grow the current (compiled) stack, then repack
 686   // the args.  We  finally end in a jump to the generic interpreter entry point.
 687   // On exit from the interpreter, the interpreter will restore our SP (lest the
 688   // compiled code, which relys solely on SP and not FP, get sick).
 689 
 690   {
 691     __ block_comment("c2i_unverified_entry {");
 692     __ load_klass(rscratch1, receiver);
 693     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
 694     __ cmp(rscratch1, tmp);
 695     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
 696     __ br(Assembler::EQ, ok);
 697     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 698 
 699     __ bind(ok);
 700     // Method might have been compiled since the call site was patched to
 701     // interpreted; if that is the case treat it as a miss so we can get
 702     // the call site corrected.
 703     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 704     __ cbz(rscratch1, skip_fixup);
 705     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 706     __ block_comment("} c2i_unverified_entry");
 707   }
 708 
 709   address c2i_entry = __ pc();
 710 
 711   // Class initialization barrier for static methods
 712   address c2i_no_clinit_check_entry = NULL;
 713   if (VM_Version::supports_fast_class_init_checks()) {
 714     Label L_skip_barrier;
 715 
 716     { // Bypass the barrier for non-static methods
 717       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 718       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 719       __ br(Assembler::EQ, L_skip_barrier); // non-static
 720     }
 721 
 722     __ load_method_holder(rscratch2, rmethod);
 723     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 724     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 725 
 726     __ bind(L_skip_barrier);
 727     c2i_no_clinit_check_entry = __ pc();
 728   }
 729 
 730   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 731 
 732   __ flush();
 733   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 734 }
 735 
 736 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 737                                          VMRegPair *regs,
 738                                          VMRegPair *regs2,
 739                                          int total_args_passed) {
 740   assert(regs2 == NULL, "not needed on AArch64");
 741 
 742 // We return the amount of VMRegImpl stack slots we need to reserve for all
 743 // the arguments NOT counting out_preserve_stack_slots.
 744 
 745     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 746       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 747     };
 748     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 749       c_farg0, c_farg1, c_farg2, c_farg3,
 750       c_farg4, c_farg5, c_farg6, c_farg7
 751     };
 752 
 753     uint int_args = 0;
 754     uint fp_args = 0;
 755     uint stk_args = 0; // inc by 2 each time
 756 
 757     for (int i = 0; i < total_args_passed; i++) {
 758       switch (sig_bt[i]) {
 759       case T_BOOLEAN:
 760       case T_CHAR:
 761       case T_BYTE:
 762       case T_SHORT:
 763       case T_INT:
 764         if (int_args < Argument::n_int_register_parameters_c) {
 765           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 766         } else {
 767           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 768           stk_args += 2;
 769         }
 770         break;
 771       case T_LONG:
 772         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 773         // fall through
 774       case T_OBJECT:
 775       case T_ARRAY:
 776       case T_ADDRESS:
 777       case T_METADATA:
 778         if (int_args < Argument::n_int_register_parameters_c) {
 779           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 780         } else {
 781           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 782           stk_args += 2;
 783         }
 784         break;
 785       case T_FLOAT:
 786         if (fp_args < Argument::n_float_register_parameters_c) {
 787           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 788         } else {
 789           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 790           stk_args += 2;
 791         }
 792         break;
 793       case T_DOUBLE:
 794         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 795         if (fp_args < Argument::n_float_register_parameters_c) {
 796           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 797         } else {
 798           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 799           stk_args += 2;
 800         }
 801         break;
 802       case T_VOID: // Halves of longs and doubles
 803         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 804         regs[i].set_bad();
 805         break;
 806       default:
 807         ShouldNotReachHere();
 808         break;
 809       }
 810     }
 811 
 812   return stk_args;
 813 }
 814 
 815 // On 64 bit we will store integer like items to the stack as
 816 // 64 bits items (sparc abi) even though java would only store
 817 // 32bits for a parameter. On 32bit it will simply be 32 bits
 818 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
 819 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 820   if (src.first()->is_stack()) {
 821     if (dst.first()->is_stack()) {
 822       // stack to stack
 823       __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
 824       __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
 825     } else {
 826       // stack to reg
 827       __ ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
 828     }
 829   } else if (dst.first()->is_stack()) {
 830     // reg to stack
 831     // Do we really have to sign extend???
 832     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
 833     __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
 834   } else {
 835     if (dst.first() != src.first()) {
 836       __ sxtw(dst.first()->as_Register(), src.first()->as_Register());
 837     }
 838   }
 839 }
 840 
 841 // An oop arg. Must pass a handle not the oop itself
 842 static void object_move(MacroAssembler* masm,
 843                         OopMap* map,
 844                         int oop_handle_offset,
 845                         int framesize_in_slots,
 846                         VMRegPair src,
 847                         VMRegPair dst,
 848                         bool is_receiver,
 849                         int* receiver_offset) {
 850 
 851   // must pass a handle. First figure out the location we use as a handle
 852 
 853   Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
 854 
 855   // See if oop is NULL if it is we need no handle
 856 
 857   if (src.first()->is_stack()) {
 858 
 859     // Oop is already on the stack as an argument
 860     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 861     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
 862     if (is_receiver) {
 863       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
 864     }
 865 
 866     __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
 867     __ lea(rHandle, Address(rfp, reg2offset_in(src.first())));
 868     // conditionally move a NULL
 869     __ cmp(rscratch1, zr);
 870     __ csel(rHandle, zr, rHandle, Assembler::EQ);
 871   } else {
 872 
 873     // Oop is in an a register we must store it to the space we reserve
 874     // on the stack for oop_handles and pass a handle if oop is non-NULL
 875 
 876     const Register rOop = src.first()->as_Register();
 877     int oop_slot;
 878     if (rOop == j_rarg0)
 879       oop_slot = 0;
 880     else if (rOop == j_rarg1)
 881       oop_slot = 1;
 882     else if (rOop == j_rarg2)
 883       oop_slot = 2;
 884     else if (rOop == j_rarg3)
 885       oop_slot = 3;
 886     else if (rOop == j_rarg4)
 887       oop_slot = 4;
 888     else if (rOop == j_rarg5)
 889       oop_slot = 5;
 890     else if (rOop == j_rarg6)
 891       oop_slot = 6;
 892     else {
 893       assert(rOop == j_rarg7, "wrong register");
 894       oop_slot = 7;
 895     }
 896 
 897     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
 898     int offset = oop_slot*VMRegImpl::stack_slot_size;
 899 
 900     map->set_oop(VMRegImpl::stack2reg(oop_slot));
 901     // Store oop in handle area, may be NULL
 902     __ str(rOop, Address(sp, offset));
 903     if (is_receiver) {
 904       *receiver_offset = offset;
 905     }
 906 
 907     __ cmp(rOop, zr);
 908     __ lea(rHandle, Address(sp, offset));
 909     // conditionally move a NULL
 910     __ csel(rHandle, zr, rHandle, Assembler::EQ);
 911   }
 912 
 913   // If arg is on the stack then place it otherwise it is already in correct reg.
 914   if (dst.first()->is_stack()) {
 915     __ str(rHandle, Address(sp, reg2offset_out(dst.first())));
 916   }
 917 }
 918 
 919 // A float arg may have to do float reg int reg conversion
 920 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 921   assert(src.first()->is_stack() && dst.first()->is_stack() ||
 922          src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
 923   if (src.first()->is_stack()) {
 924     if (dst.first()->is_stack()) {
 925       __ ldrw(rscratch1, Address(rfp, reg2offset_in(src.first())));
 926       __ strw(rscratch1, Address(sp, reg2offset_out(dst.first())));
 927     } else {
 928       ShouldNotReachHere();
 929     }
 930   } else if (src.first() != dst.first()) {
 931     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
 932       __ fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
 933     else
 934       ShouldNotReachHere();
 935   }
 936 }
 937 
 938 // A long move
 939 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 940   if (src.first()->is_stack()) {
 941     if (dst.first()->is_stack()) {
 942       // stack to stack
 943       __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
 944       __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
 945     } else {
 946       // stack to reg
 947       __ ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
 948     }
 949   } else if (dst.first()->is_stack()) {
 950     // reg to stack
 951     // Do we really have to sign extend???
 952     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
 953     __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
 954   } else {
 955     if (dst.first() != src.first()) {
 956       __ mov(dst.first()->as_Register(), src.first()->as_Register());
 957     }
 958   }
 959 }
 960 
 961 
 962 // A double move
 963 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 964   assert(src.first()->is_stack() && dst.first()->is_stack() ||
 965          src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
 966   if (src.first()->is_stack()) {
 967     if (dst.first()->is_stack()) {
 968       __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
 969       __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
 970     } else {
 971       ShouldNotReachHere();
 972     }
 973   } else if (src.first() != dst.first()) {
 974     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
 975       __ fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
 976     else
 977       ShouldNotReachHere();
 978   }
 979 }
 980 
 981 
 982 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 983   // We always ignore the frame_slots arg and just use the space just below frame pointer
 984   // which by this time is free to use
 985   switch (ret_type) {
 986   case T_FLOAT:
 987     __ strs(v0, Address(rfp, -wordSize));
 988     break;
 989   case T_DOUBLE:
 990     __ strd(v0, Address(rfp, -wordSize));
 991     break;
 992   case T_VOID:  break;
 993   default: {
 994     __ str(r0, Address(rfp, -wordSize));
 995     }
 996   }
 997 }
 998 
 999 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1000   // We always ignore the frame_slots arg and just use the space just below frame pointer
1001   // which by this time is free to use
1002   switch (ret_type) {
1003   case T_FLOAT:
1004     __ ldrs(v0, Address(rfp, -wordSize));
1005     break;
1006   case T_DOUBLE:
1007     __ ldrd(v0, Address(rfp, -wordSize));
1008     break;
1009   case T_VOID:  break;
1010   default: {
1011     __ ldr(r0, Address(rfp, -wordSize));
1012     }
1013   }
1014 }
1015 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1016   RegSet x;
1017   for ( int i = first_arg ; i < arg_count ; i++ ) {
1018     if (args[i].first()->is_Register()) {
1019       x = x + args[i].first()->as_Register();
1020     } else if (args[i].first()->is_FloatRegister()) {
1021       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
1022     }
1023   }
1024   __ push(x, sp);
1025 }
1026 
1027 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1028   RegSet x;
1029   for ( int i = first_arg ; i < arg_count ; i++ ) {
1030     if (args[i].first()->is_Register()) {
1031       x = x + args[i].first()->as_Register();
1032     } else {
1033       ;
1034     }
1035   }
1036   __ pop(x, sp);
1037   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1038     if (args[i].first()->is_Register()) {
1039       ;
1040     } else if (args[i].first()->is_FloatRegister()) {
1041       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
1042     }
1043   }
1044 }
1045 
1046 
1047 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1048 // keeps a new JNI critical region from starting until a GC has been
1049 // forced.  Save down any oops in registers and describe them in an
1050 // OopMap.
1051 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1052                                                int stack_slots,
1053                                                int total_c_args,
1054                                                int total_in_args,
1055                                                int arg_save_area,
1056                                                OopMapSet* oop_maps,
1057                                                VMRegPair* in_regs,
1058                                                BasicType* in_sig_bt) { Unimplemented(); }
1059 
1060 // Unpack an array argument into a pointer to the body and the length
1061 // if the array is non-null, otherwise pass 0 for both.
1062 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); }
1063 
1064 
1065 class ComputeMoveOrder: public StackObj {
1066   class MoveOperation: public ResourceObj {
1067     friend class ComputeMoveOrder;
1068    private:
1069     VMRegPair        _src;
1070     VMRegPair        _dst;
1071     int              _src_index;
1072     int              _dst_index;
1073     bool             _processed;
1074     MoveOperation*  _next;
1075     MoveOperation*  _prev;
1076 
1077     static int get_id(VMRegPair r) { Unimplemented(); return 0; }
1078 
1079    public:
1080     MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1081       _src(src)
1082     , _dst(dst)
1083     , _src_index(src_index)
1084     , _dst_index(dst_index)
1085     , _processed(false)
1086     , _next(NULL)
1087     , _prev(NULL) { Unimplemented(); }
1088 
1089     VMRegPair src() const              { Unimplemented(); return _src; }
1090     int src_id() const                 { Unimplemented(); return 0; }
1091     int src_index() const              { Unimplemented(); return 0; }
1092     VMRegPair dst() const              { Unimplemented(); return _src; }
1093     void set_dst(int i, VMRegPair dst) { Unimplemented(); }
1094     int dst_index() const              { Unimplemented(); return 0; }
1095     int dst_id() const                 { Unimplemented(); return 0; }
1096     MoveOperation* next() const        { Unimplemented(); return 0; }
1097     MoveOperation* prev() const        { Unimplemented(); return 0; }
1098     void set_processed()               { Unimplemented(); }
1099     bool is_processed() const          { Unimplemented(); return 0; }
1100 
1101     // insert
1102     void break_cycle(VMRegPair temp_register) { Unimplemented(); }
1103 
1104     void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); }
1105   };
1106 
1107  private:
1108   GrowableArray<MoveOperation*> edges;
1109 
1110  public:
1111   ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1112                     BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); }
1113 
1114   // Collected all the move operations
1115   void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); }
1116 
1117   // Walk the edges breaking cycles between moves.  The result list
1118   // can be walked in order to produce the proper set of loads
1119   GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; }
1120 };
1121 
1122 
1123 static void rt_call(MacroAssembler* masm, address dest, int gpargs, int fpargs, int type) {
1124   CodeBlob *cb = CodeCache::find_blob(dest);
1125   if (cb) {
1126     __ far_call(RuntimeAddress(dest));
1127   } else {
1128     assert((unsigned)gpargs < 256, "eek!");
1129     assert((unsigned)fpargs < 32, "eek!");
1130     __ lea(rscratch1, RuntimeAddress(dest));
1131     __ blr(rscratch1);
1132     __ maybe_isb();
1133   }
1134 }
1135 
1136 static void verify_oop_args(MacroAssembler* masm,
1137                             const methodHandle& method,
1138                             const BasicType* sig_bt,
1139                             const VMRegPair* regs) {
1140   Register temp_reg = r19;  // not part of any compiled calling seq
1141   if (VerifyOops) {
1142     for (int i = 0; i < method->size_of_parameters(); i++) {
1143       if (sig_bt[i] == T_OBJECT ||
1144           sig_bt[i] == T_ARRAY) {
1145         VMReg r = regs[i].first();
1146         assert(r->is_valid(), "bad oop arg");
1147         if (r->is_stack()) {
1148           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1149           __ verify_oop(temp_reg);
1150         } else {
1151           __ verify_oop(r->as_Register());
1152         }
1153       }
1154     }
1155   }
1156 }
1157 
1158 static void gen_special_dispatch(MacroAssembler* masm,
1159                                  const methodHandle& method,
1160                                  const BasicType* sig_bt,
1161                                  const VMRegPair* regs) {
1162   verify_oop_args(masm, method, sig_bt, regs);
1163   vmIntrinsics::ID iid = method->intrinsic_id();
1164 
1165   // Now write the args into the outgoing interpreter space
1166   bool     has_receiver   = false;
1167   Register receiver_reg   = noreg;
1168   int      member_arg_pos = -1;
1169   Register member_reg     = noreg;
1170   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1171   if (ref_kind != 0) {
1172     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1173     member_reg = r19;  // known to be free at this point
1174     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1175   } else if (iid == vmIntrinsics::_invokeBasic) {
1176     has_receiver = true;
1177   } else {
1178     fatal("unexpected intrinsic id %d", iid);
1179   }
1180 
1181   if (member_reg != noreg) {
1182     // Load the member_arg into register, if necessary.
1183     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1184     VMReg r = regs[member_arg_pos].first();
1185     if (r->is_stack()) {
1186       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1187     } else {
1188       // no data motion is needed
1189       member_reg = r->as_Register();
1190     }
1191   }
1192 
1193   if (has_receiver) {
1194     // Make sure the receiver is loaded into a register.
1195     assert(method->size_of_parameters() > 0, "oob");
1196     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1197     VMReg r = regs[0].first();
1198     assert(r->is_valid(), "bad receiver arg");
1199     if (r->is_stack()) {
1200       // Porting note:  This assumes that compiled calling conventions always
1201       // pass the receiver oop in a register.  If this is not true on some
1202       // platform, pick a temp and load the receiver from stack.
1203       fatal("receiver always in a register");
1204       receiver_reg = r2;  // known to be free at this point
1205       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1206     } else {
1207       // no data motion is needed
1208       receiver_reg = r->as_Register();
1209     }
1210   }
1211 
1212   // Figure out which address we are really jumping to:
1213   MethodHandles::generate_method_handle_dispatch(masm, iid,
1214                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1215 }
1216 
1217 // ---------------------------------------------------------------------------
1218 // Generate a native wrapper for a given method.  The method takes arguments
1219 // in the Java compiled code convention, marshals them to the native
1220 // convention (handlizes oops, etc), transitions to native, makes the call,
1221 // returns to java state (possibly blocking), unhandlizes any result and
1222 // returns.
1223 //
1224 // Critical native functions are a shorthand for the use of
1225 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1226 // functions.  The wrapper is expected to unpack the arguments before
1227 // passing them to the callee and perform checks before and after the
1228 // native call to ensure that they GCLocker
1229 // lock_critical/unlock_critical semantics are followed.  Some other
1230 // parts of JNI setup are skipped like the tear down of the JNI handle
1231 // block and the check for pending exceptions it's impossible for them
1232 // to be thrown.
1233 //
1234 // They are roughly structured like this:
1235 //    if (GCLocker::needs_gc())
1236 //      SharedRuntime::block_for_jni_critical();
1237 //    tranistion to thread_in_native
1238 //    unpack arrray arguments and call native entry point
1239 //    check for safepoint in progress
1240 //    check if any thread suspend flags are set
1241 //      call into JVM and possible unlock the JNI critical
1242 //      if a GC was suppressed while in the critical native.
1243 //    transition back to thread_in_Java
1244 //    return to caller
1245 //
1246 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1247                                                 const methodHandle& method,
1248                                                 int compile_id,
1249                                                 BasicType* in_sig_bt,
1250                                                 VMRegPair* in_regs,
1251                                                 BasicType ret_type,
1252                                                 address critical_entry) {
1253   if (method->is_method_handle_intrinsic()) {
1254     vmIntrinsics::ID iid = method->intrinsic_id();
1255     intptr_t start = (intptr_t)__ pc();
1256     int vep_offset = ((intptr_t)__ pc()) - start;
1257 
1258     // First instruction must be a nop as it may need to be patched on deoptimisation
1259     __ nop();
1260     gen_special_dispatch(masm,
1261                          method,
1262                          in_sig_bt,
1263                          in_regs);
1264     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1265     __ flush();
1266     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1267     return nmethod::new_native_nmethod(method,
1268                                        compile_id,
1269                                        masm->code(),
1270                                        vep_offset,
1271                                        frame_complete,
1272                                        stack_slots / VMRegImpl::slots_per_word,
1273                                        in_ByteSize(-1),
1274                                        in_ByteSize(-1),
1275                                        (OopMapSet*)NULL);
1276   }
1277   bool is_critical_native = true;
1278   address native_func = critical_entry;
1279   if (native_func == NULL) {
1280     native_func = method->native_function();
1281     is_critical_native = false;
1282   }
1283   assert(native_func != NULL, "must have function");
1284 
1285   // An OopMap for lock (and class if static)
1286   OopMapSet *oop_maps = new OopMapSet();
1287   intptr_t start = (intptr_t)__ pc();
1288 
1289   // We have received a description of where all the java arg are located
1290   // on entry to the wrapper. We need to convert these args to where
1291   // the jni function will expect them. To figure out where they go
1292   // we convert the java signature to a C signature by inserting
1293   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1294 
1295   const int total_in_args = method->size_of_parameters();
1296   int total_c_args = total_in_args;
1297   if (!is_critical_native) {
1298     total_c_args += 1;
1299     if (method->is_static()) {
1300       total_c_args++;
1301     }
1302   } else {
1303     for (int i = 0; i < total_in_args; i++) {
1304       if (in_sig_bt[i] == T_ARRAY) {
1305         total_c_args++;
1306       }
1307     }
1308   }
1309 
1310   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1311   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1312   BasicType* in_elem_bt = NULL;
1313 
1314   int argc = 0;
1315   if (!is_critical_native) {
1316     out_sig_bt[argc++] = T_ADDRESS;
1317     if (method->is_static()) {
1318       out_sig_bt[argc++] = T_OBJECT;
1319     }
1320 
1321     for (int i = 0; i < total_in_args ; i++ ) {
1322       out_sig_bt[argc++] = in_sig_bt[i];
1323     }
1324   } else {
1325     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1326     SignatureStream ss(method->signature());
1327     for (int i = 0; i < total_in_args ; i++ ) {
1328       if (in_sig_bt[i] == T_ARRAY) {
1329         // Arrays are passed as int, elem* pair
1330         out_sig_bt[argc++] = T_INT;
1331         out_sig_bt[argc++] = T_ADDRESS;
1332         Symbol* atype = ss.as_symbol();
1333         const char* at = atype->as_C_string();
1334         if (strlen(at) == 2) {
1335           assert(at[0] == '[', "must be");
1336           switch (at[1]) {
1337             case 'B': in_elem_bt[i]  = T_BYTE; break;
1338             case 'C': in_elem_bt[i]  = T_CHAR; break;
1339             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1340             case 'F': in_elem_bt[i]  = T_FLOAT; break;
1341             case 'I': in_elem_bt[i]  = T_INT; break;
1342             case 'J': in_elem_bt[i]  = T_LONG; break;
1343             case 'S': in_elem_bt[i]  = T_SHORT; break;
1344             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1345             default: ShouldNotReachHere();
1346           }
1347         }
1348       } else {
1349         out_sig_bt[argc++] = in_sig_bt[i];
1350         in_elem_bt[i] = T_VOID;
1351       }
1352       if (in_sig_bt[i] != T_VOID) {
1353         assert(in_sig_bt[i] == ss.type(), "must match");
1354         ss.next();
1355       }
1356     }
1357   }
1358 
1359   // Now figure out where the args must be stored and how much stack space
1360   // they require.
1361   int out_arg_slots;
1362   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1363 
1364   // Compute framesize for the wrapper.  We need to handlize all oops in
1365   // incoming registers
1366 
1367   // Calculate the total number of stack slots we will need.
1368 
1369   // First count the abi requirement plus all of the outgoing args
1370   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1371 
1372   // Now the space for the inbound oop handle area
1373   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1374   if (is_critical_native) {
1375     // Critical natives may have to call out so they need a save area
1376     // for register arguments.
1377     int double_slots = 0;
1378     int single_slots = 0;
1379     for ( int i = 0; i < total_in_args; i++) {
1380       if (in_regs[i].first()->is_Register()) {
1381         const Register reg = in_regs[i].first()->as_Register();
1382         switch (in_sig_bt[i]) {
1383           case T_BOOLEAN:
1384           case T_BYTE:
1385           case T_SHORT:
1386           case T_CHAR:
1387           case T_INT:  single_slots++; break;
1388           case T_ARRAY:  // specific to LP64 (7145024)
1389           case T_LONG: double_slots++; break;
1390           default:  ShouldNotReachHere();
1391         }
1392       } else if (in_regs[i].first()->is_FloatRegister()) {
1393         ShouldNotReachHere();
1394       }
1395     }
1396     total_save_slots = double_slots * 2 + single_slots;
1397     // align the save area
1398     if (double_slots != 0) {
1399       stack_slots = align_up(stack_slots, 2);
1400     }
1401   }
1402 
1403   int oop_handle_offset = stack_slots;
1404   stack_slots += total_save_slots;
1405 
1406   // Now any space we need for handlizing a klass if static method
1407 
1408   int klass_slot_offset = 0;
1409   int klass_offset = -1;
1410   int lock_slot_offset = 0;
1411   bool is_static = false;
1412 
1413   if (method->is_static()) {
1414     klass_slot_offset = stack_slots;
1415     stack_slots += VMRegImpl::slots_per_word;
1416     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1417     is_static = true;
1418   }
1419 
1420   // Plus a lock if needed
1421 
1422   if (method->is_synchronized()) {
1423     lock_slot_offset = stack_slots;
1424     stack_slots += VMRegImpl::slots_per_word;
1425   }
1426 
1427   // Now a place (+2) to save return values or temp during shuffling
1428   // + 4 for return address (which we own) and saved rfp
1429   stack_slots += 6;
1430 
1431   // Ok The space we have allocated will look like:
1432   //
1433   //
1434   // FP-> |                     |
1435   //      |---------------------|
1436   //      | 2 slots for moves   |
1437   //      |---------------------|
1438   //      | lock box (if sync)  |
1439   //      |---------------------| <- lock_slot_offset
1440   //      | klass (if static)   |
1441   //      |---------------------| <- klass_slot_offset
1442   //      | oopHandle area      |
1443   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1444   //      | outbound memory     |
1445   //      | based arguments     |
1446   //      |                     |
1447   //      |---------------------|
1448   //      |                     |
1449   // SP-> | out_preserved_slots |
1450   //
1451   //
1452 
1453 
1454   // Now compute actual number of stack words we need rounding to make
1455   // stack properly aligned.
1456   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1457 
1458   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1459 
1460   // First thing make an ic check to see if we should even be here
1461 
1462   // We are free to use all registers as temps without saving them and
1463   // restoring them except rfp. rfp is the only callee save register
1464   // as far as the interpreter and the compiler(s) are concerned.
1465 
1466 
1467   const Register ic_reg = rscratch2;
1468   const Register receiver = j_rarg0;
1469 
1470   Label hit;
1471   Label exception_pending;
1472 
1473   assert_different_registers(ic_reg, receiver, rscratch1);
1474   __ verify_oop(receiver);
1475   __ cmp_klass(receiver, ic_reg, rscratch1);
1476   __ br(Assembler::EQ, hit);
1477 
1478   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1479 
1480   // Verified entry point must be aligned
1481   __ align(8);
1482 
1483   __ bind(hit);
1484 
1485   int vep_offset = ((intptr_t)__ pc()) - start;
1486 
1487   // If we have to make this method not-entrant we'll overwrite its
1488   // first instruction with a jump.  For this action to be legal we
1489   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1490   // SVC, HVC, or SMC.  Make it a NOP.
1491   __ nop();
1492 
1493   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1494     Label L_skip_barrier;
1495     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1496     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1497     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1498 
1499     __ bind(L_skip_barrier);
1500   }
1501 
1502   // Generate stack overflow check
1503   if (UseStackBanging) {
1504     __ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
1505   } else {
1506     Unimplemented();
1507   }
1508 
1509   // Generate a new frame for the wrapper.
1510   __ enter();
1511   // -2 because return address is already present and so is saved rfp
1512   __ sub(sp, sp, stack_size - 2*wordSize);
1513 
1514   // Frame is now completed as far as size and linkage.
1515   int frame_complete = ((intptr_t)__ pc()) - start;
1516 
1517   // We use r20 as the oop handle for the receiver/klass
1518   // It is callee save so it survives the call to native
1519 
1520   const Register oop_handle_reg = r20;
1521 
1522   if (is_critical_native) {
1523     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
1524                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1525   }
1526 
1527   //
1528   // We immediately shuffle the arguments so that any vm call we have to
1529   // make from here on out (sync slow path, jvmti, etc.) we will have
1530   // captured the oops from our caller and have a valid oopMap for
1531   // them.
1532 
1533   // -----------------
1534   // The Grand Shuffle
1535 
1536   // The Java calling convention is either equal (linux) or denser (win64) than the
1537   // c calling convention. However the because of the jni_env argument the c calling
1538   // convention always has at least one more (and two for static) arguments than Java.
1539   // Therefore if we move the args from java -> c backwards then we will never have
1540   // a register->register conflict and we don't have to build a dependency graph
1541   // and figure out how to break any cycles.
1542   //
1543 
1544   // Record esp-based slot for receiver on stack for non-static methods
1545   int receiver_offset = -1;
1546 
1547   // This is a trick. We double the stack slots so we can claim
1548   // the oops in the caller's frame. Since we are sure to have
1549   // more args than the caller doubling is enough to make
1550   // sure we can capture all the incoming oop args from the
1551   // caller.
1552   //
1553   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1554 
1555   // Mark location of rfp (someday)
1556   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1557 
1558 
1559   int float_args = 0;
1560   int int_args = 0;
1561 
1562 #ifdef ASSERT
1563   bool reg_destroyed[RegisterImpl::number_of_registers];
1564   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
1565   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
1566     reg_destroyed[r] = false;
1567   }
1568   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
1569     freg_destroyed[f] = false;
1570   }
1571 
1572 #endif /* ASSERT */
1573 
1574   // This may iterate in two different directions depending on the
1575   // kind of native it is.  The reason is that for regular JNI natives
1576   // the incoming and outgoing registers are offset upwards and for
1577   // critical natives they are offset down.
1578   GrowableArray<int> arg_order(2 * total_in_args);
1579   VMRegPair tmp_vmreg;
1580   tmp_vmreg.set2(r19->as_VMReg());
1581 
1582   if (!is_critical_native) {
1583     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1584       arg_order.push(i);
1585       arg_order.push(c_arg);
1586     }
1587   } else {
1588     // Compute a valid move order, using tmp_vmreg to break any cycles
1589     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
1590   }
1591 
1592   int temploc = -1;
1593   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1594     int i = arg_order.at(ai);
1595     int c_arg = arg_order.at(ai + 1);
1596     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1597     if (c_arg == -1) {
1598       assert(is_critical_native, "should only be required for critical natives");
1599       // This arg needs to be moved to a temporary
1600       __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
1601       in_regs[i] = tmp_vmreg;
1602       temploc = i;
1603       continue;
1604     } else if (i == -1) {
1605       assert(is_critical_native, "should only be required for critical natives");
1606       // Read from the temporary location
1607       assert(temploc != -1, "must be valid");
1608       i = temploc;
1609       temploc = -1;
1610     }
1611 #ifdef ASSERT
1612     if (in_regs[i].first()->is_Register()) {
1613       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1614     } else if (in_regs[i].first()->is_FloatRegister()) {
1615       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1616     }
1617     if (out_regs[c_arg].first()->is_Register()) {
1618       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1619     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1620       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1621     }
1622 #endif /* ASSERT */
1623     switch (in_sig_bt[i]) {
1624       case T_ARRAY:
1625         if (is_critical_native) {
1626           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1627           c_arg++;
1628 #ifdef ASSERT
1629           if (out_regs[c_arg].first()->is_Register()) {
1630             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1631           } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1632             freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1633           }
1634 #endif
1635           int_args++;
1636           break;
1637         }
1638       case T_OBJECT:
1639         assert(!is_critical_native, "no oop arguments");
1640         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1641                     ((i == 0) && (!is_static)),
1642                     &receiver_offset);
1643         int_args++;
1644         break;
1645       case T_VOID:
1646         break;
1647 
1648       case T_FLOAT:
1649         float_move(masm, in_regs[i], out_regs[c_arg]);
1650         float_args++;
1651         break;
1652 
1653       case T_DOUBLE:
1654         assert( i + 1 < total_in_args &&
1655                 in_sig_bt[i + 1] == T_VOID &&
1656                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1657         double_move(masm, in_regs[i], out_regs[c_arg]);
1658         float_args++;
1659         break;
1660 
1661       case T_LONG :
1662         long_move(masm, in_regs[i], out_regs[c_arg]);
1663         int_args++;
1664         break;
1665 
1666       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1667 
1668       default:
1669         move32_64(masm, in_regs[i], out_regs[c_arg]);
1670         int_args++;
1671     }
1672   }
1673 
1674   // point c_arg at the first arg that is already loaded in case we
1675   // need to spill before we call out
1676   int c_arg = total_c_args - total_in_args;
1677 
1678   // Pre-load a static method's oop into c_rarg1.
1679   if (method->is_static() && !is_critical_native) {
1680 
1681     //  load oop into a register
1682     __ movoop(c_rarg1,
1683               JNIHandles::make_local(method->method_holder()->java_mirror()),
1684               /*immediate*/true);
1685 
1686     // Now handlize the static class mirror it's known not-null.
1687     __ str(c_rarg1, Address(sp, klass_offset));
1688     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1689 
1690     // Now get the handle
1691     __ lea(c_rarg1, Address(sp, klass_offset));
1692     // and protect the arg if we must spill
1693     c_arg--;
1694   }
1695 
1696   // Change state to native (we save the return address in the thread, since it might not
1697   // be pushed on the stack when we do a stack traversal).
1698   // We use the same pc/oopMap repeatedly when we call out
1699 
1700   Label native_return;
1701   __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1702 
1703   Label dtrace_method_entry, dtrace_method_entry_done;
1704   {
1705     unsigned long offset;
1706     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1707     __ ldrb(rscratch1, Address(rscratch1, offset));
1708     __ cbnzw(rscratch1, dtrace_method_entry);
1709     __ bind(dtrace_method_entry_done);
1710   }
1711 
1712   // RedefineClasses() tracing support for obsolete method entry
1713   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1714     // protect the args we've loaded
1715     save_args(masm, total_c_args, c_arg, out_regs);
1716     __ mov_metadata(c_rarg1, method());
1717     __ call_VM_leaf(
1718       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1719       rthread, c_rarg1);
1720     restore_args(masm, total_c_args, c_arg, out_regs);
1721   }
1722 
1723   // Lock a synchronized method
1724 
1725   // Register definitions used by locking and unlocking
1726 
1727   const Register swap_reg = r0;
1728   const Register obj_reg  = r19;  // Will contain the oop
1729   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1730   const Register old_hdr  = r13;  // value of old header at unlock time
1731   const Register tmp = lr;
1732 
1733   Label slow_path_lock;
1734   Label lock_done;
1735 
1736   if (method->is_synchronized()) {
1737     assert(!is_critical_native, "unhandled");
1738 
1739     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1740 
1741     // Get the handle (the 2nd argument)
1742     __ mov(oop_handle_reg, c_rarg1);
1743 
1744     // Get address of the box
1745 
1746     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1747 
1748     // Load the oop from the handle
1749     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1750 
1751     __ resolve(IS_NOT_NULL, obj_reg);
1752 
1753     if (UseBiasedLocking) {
1754       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1755     }
1756 
1757     // Load (object->mark() | 1) into swap_reg %r0
1758     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1759     __ orr(swap_reg, rscratch1, 1);
1760 
1761     // Save (object->mark() | 1) into BasicLock's displaced header
1762     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1763 
1764     // src -> dest iff dest == r0 else r0 <- dest
1765     { Label here;
1766       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1767     }
1768 
1769     // Hmm should this move to the slow path code area???
1770 
1771     // Test if the oopMark is an obvious stack pointer, i.e.,
1772     //  1) (mark & 3) == 0, and
1773     //  2) sp <= mark < mark + os::pagesize()
1774     // These 3 tests can be done by evaluating the following
1775     // expression: ((mark - sp) & (3 - os::vm_page_size())),
1776     // assuming both stack pointer and pagesize have their
1777     // least significant 2 bits clear.
1778     // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1779 
1780     __ sub(swap_reg, sp, swap_reg);
1781     __ neg(swap_reg, swap_reg);
1782     __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1783 
1784     // Save the test result, for recursive case, the result is zero
1785     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1786     __ br(Assembler::NE, slow_path_lock);
1787 
1788     // Slow path will re-enter here
1789 
1790     __ bind(lock_done);
1791   }
1792 
1793 
1794   // Finally just about ready to make the JNI call
1795 
1796   // get JNIEnv* which is first argument to native
1797   if (!is_critical_native) {
1798     __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1799   }
1800 
1801   // Now set thread in native
1802   __ mov(rscratch1, _thread_in_native);
1803   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1804   __ stlrw(rscratch1, rscratch2);
1805 
1806   {
1807     int return_type = 0;
1808     switch (ret_type) {
1809     case T_VOID: break;
1810       return_type = 0; break;
1811     case T_CHAR:
1812     case T_BYTE:
1813     case T_SHORT:
1814     case T_INT:
1815     case T_BOOLEAN:
1816     case T_LONG:
1817       return_type = 1; break;
1818     case T_ARRAY:
1819     case T_OBJECT:
1820       return_type = 1; break;
1821     case T_FLOAT:
1822       return_type = 2; break;
1823     case T_DOUBLE:
1824       return_type = 3; break;
1825     default:
1826       ShouldNotReachHere();
1827     }
1828     rt_call(masm, native_func,
1829             int_args + 2, // AArch64 passes up to 8 args in int registers
1830             float_args,   // and up to 8 float args
1831             return_type);
1832   }
1833 
1834   __ bind(native_return);
1835 
1836   intptr_t return_pc = (intptr_t) __ pc();
1837   oop_maps->add_gc_map(return_pc - start, map);
1838 
1839   // Unpack native results.
1840   switch (ret_type) {
1841   case T_BOOLEAN: __ c2bool(r0);                     break;
1842   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1843   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1844   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1845   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1846   case T_DOUBLE :
1847   case T_FLOAT  :
1848     // Result is in v0 we'll save as needed
1849     break;
1850   case T_ARRAY:                 // Really a handle
1851   case T_OBJECT:                // Really a handle
1852       break; // can't de-handlize until after safepoint check
1853   case T_VOID: break;
1854   case T_LONG: break;
1855   default       : ShouldNotReachHere();
1856   }
1857 
1858   // Switch thread to "native transition" state before reading the synchronization state.
1859   // This additional state is necessary because reading and testing the synchronization
1860   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1861   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1862   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1863   //     Thread A is resumed to finish this native method, but doesn't block here since it
1864   //     didn't see any synchronization is progress, and escapes.
1865   __ mov(rscratch1, _thread_in_native_trans);
1866 
1867   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1868 
1869   // Force this write out before the read below
1870   __ dmb(Assembler::ISH);
1871 
1872   // check for safepoint operation in progress and/or pending suspend requests
1873   Label safepoint_in_progress, safepoint_in_progress_done;
1874   {
1875     __ safepoint_poll_acquire(safepoint_in_progress);
1876     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1877     __ cbnzw(rscratch1, safepoint_in_progress);
1878     __ bind(safepoint_in_progress_done);
1879   }
1880 
1881   // change thread state
1882   Label after_transition;
1883   __ mov(rscratch1, _thread_in_Java);
1884   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1885   __ stlrw(rscratch1, rscratch2);
1886   __ bind(after_transition);
1887 
1888   Label reguard;
1889   Label reguard_done;
1890   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1891   __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1892   __ br(Assembler::EQ, reguard);
1893   __ bind(reguard_done);
1894 
1895   // native result if any is live
1896 
1897   // Unlock
1898   Label unlock_done;
1899   Label slow_path_unlock;
1900   if (method->is_synchronized()) {
1901 
1902     // Get locked oop from the handle we passed to jni
1903     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1904 
1905     __ resolve(IS_NOT_NULL, obj_reg);
1906 
1907     Label done;
1908 
1909     if (UseBiasedLocking) {
1910       __ biased_locking_exit(obj_reg, old_hdr, done);
1911     }
1912 
1913     // Simple recursive lock?
1914 
1915     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1916     __ cbz(rscratch1, done);
1917 
1918     // Must save r0 if if it is live now because cmpxchg must use it
1919     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1920       save_native_result(masm, ret_type, stack_slots);
1921     }
1922 
1923 
1924     // get address of the stack lock
1925     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1926     //  get old displaced header
1927     __ ldr(old_hdr, Address(r0, 0));
1928 
1929     // Atomic swap old header if oop still contains the stack lock
1930     Label succeed;
1931     __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1932     __ bind(succeed);
1933 
1934     // slow path re-enters here
1935     __ bind(unlock_done);
1936     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1937       restore_native_result(masm, ret_type, stack_slots);
1938     }
1939 
1940     __ bind(done);
1941   }
1942 
1943   Label dtrace_method_exit, dtrace_method_exit_done;
1944   {
1945     unsigned long offset;
1946     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1947     __ ldrb(rscratch1, Address(rscratch1, offset));
1948     __ cbnzw(rscratch1, dtrace_method_exit);
1949     __ bind(dtrace_method_exit_done);
1950   }
1951 
1952   __ reset_last_Java_frame(false);
1953 
1954   // Unbox oop result, e.g. JNIHandles::resolve result.
1955   if (is_reference_type(ret_type)) {
1956     __ resolve_jobject(r0, rthread, rscratch2);
1957   }
1958 
1959   if (CheckJNICalls) {
1960     // clear_pending_jni_exception_check
1961     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1962   }
1963 
1964   if (!is_critical_native) {
1965     // reset handle block
1966     __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
1967     __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
1968   }
1969 
1970   __ leave();
1971 
1972   if (!is_critical_native) {
1973     // Any exception pending?
1974     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1975     __ cbnz(rscratch1, exception_pending);
1976   }
1977 
1978   // We're done
1979   __ ret(lr);
1980 
1981   // Unexpected paths are out of line and go here
1982 
1983   if (!is_critical_native) {
1984     // forward the exception
1985     __ bind(exception_pending);
1986 
1987     // and forward the exception
1988     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1989   }
1990 
1991   // Slow path locking & unlocking
1992   if (method->is_synchronized()) {
1993 
1994     __ block_comment("Slow path lock {");
1995     __ bind(slow_path_lock);
1996 
1997     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1998     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1999 
2000     // protect the args we've loaded
2001     save_args(masm, total_c_args, c_arg, out_regs);
2002 
2003     __ mov(c_rarg0, obj_reg);
2004     __ mov(c_rarg1, lock_reg);
2005     __ mov(c_rarg2, rthread);
2006 
2007     // Not a leaf but we have last_Java_frame setup as we want
2008     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2009     restore_args(masm, total_c_args, c_arg, out_regs);
2010 
2011 #ifdef ASSERT
2012     { Label L;
2013       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2014       __ cbz(rscratch1, L);
2015       __ stop("no pending exception allowed on exit from monitorenter");
2016       __ bind(L);
2017     }
2018 #endif
2019     __ b(lock_done);
2020 
2021     __ block_comment("} Slow path lock");
2022 
2023     __ block_comment("Slow path unlock {");
2024     __ bind(slow_path_unlock);
2025 
2026     // If we haven't already saved the native result we must save it now as xmm registers
2027     // are still exposed.
2028 
2029     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2030       save_native_result(masm, ret_type, stack_slots);
2031     }
2032 
2033     __ mov(c_rarg2, rthread);
2034     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2035     __ mov(c_rarg0, obj_reg);
2036 
2037     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2038     // NOTE that obj_reg == r19 currently
2039     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2040     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2041 
2042     rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), 3, 0, 1);
2043 
2044 #ifdef ASSERT
2045     {
2046       Label L;
2047       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2048       __ cbz(rscratch1, L);
2049       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2050       __ bind(L);
2051     }
2052 #endif /* ASSERT */
2053 
2054     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2055 
2056     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2057       restore_native_result(masm, ret_type, stack_slots);
2058     }
2059     __ b(unlock_done);
2060 
2061     __ block_comment("} Slow path unlock");
2062 
2063   } // synchronized
2064 
2065   // SLOW PATH Reguard the stack if needed
2066 
2067   __ bind(reguard);
2068   save_native_result(masm, ret_type, stack_slots);
2069   rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), 0, 0, 0);
2070   restore_native_result(masm, ret_type, stack_slots);
2071   // and continue
2072   __ b(reguard_done);
2073 
2074   // SLOW PATH safepoint
2075   {
2076     __ block_comment("safepoint {");
2077     __ bind(safepoint_in_progress);
2078 
2079     // Don't use call_VM as it will see a possible pending exception and forward it
2080     // and never return here preventing us from clearing _last_native_pc down below.
2081     //
2082     save_native_result(masm, ret_type, stack_slots);
2083     __ mov(c_rarg0, rthread);
2084 #ifndef PRODUCT
2085   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2086 #endif
2087     if (!is_critical_native) {
2088       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2089     } else {
2090       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2091     }
2092     __ blr(rscratch1);
2093     __ maybe_isb();
2094     // Restore any method result value
2095     restore_native_result(masm, ret_type, stack_slots);
2096 
2097     if (is_critical_native) {
2098       // The call above performed the transition to thread_in_Java so
2099       // skip the transition logic above.
2100       __ b(after_transition);
2101     }
2102 
2103     __ b(safepoint_in_progress_done);
2104     __ block_comment("} safepoint");
2105   }
2106 
2107   // SLOW PATH dtrace support
2108   {
2109     __ block_comment("dtrace entry {");
2110     __ bind(dtrace_method_entry);
2111 
2112     // We have all of the arguments setup at this point. We must not touch any register
2113     // argument registers at this point (what if we save/restore them there are no oop?
2114 
2115     save_args(masm, total_c_args, c_arg, out_regs);
2116     __ mov_metadata(c_rarg1, method());
2117     __ call_VM_leaf(
2118       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2119       rthread, c_rarg1);
2120     restore_args(masm, total_c_args, c_arg, out_regs);
2121     __ b(dtrace_method_entry_done);
2122     __ block_comment("} dtrace entry");
2123   }
2124 
2125   {
2126     __ block_comment("dtrace exit {");
2127     __ bind(dtrace_method_exit);
2128     save_native_result(masm, ret_type, stack_slots);
2129     __ mov_metadata(c_rarg1, method());
2130     __ call_VM_leaf(
2131          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2132          rthread, c_rarg1);
2133     restore_native_result(masm, ret_type, stack_slots);
2134     __ b(dtrace_method_exit_done);
2135     __ block_comment("} dtrace exit");
2136   }
2137 
2138 
2139   __ flush();
2140 
2141   nmethod *nm = nmethod::new_native_nmethod(method,
2142                                             compile_id,
2143                                             masm->code(),
2144                                             vep_offset,
2145                                             frame_complete,
2146                                             stack_slots / VMRegImpl::slots_per_word,
2147                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2148                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2149                                             oop_maps);
2150 
2151   if (is_critical_native) {
2152     nm->set_lazy_critical_native(true);
2153   }
2154 
2155   return nm;
2156 
2157 }
2158 
2159 // this function returns the adjust size (in number of words) to a c2i adapter
2160 // activation for use during deoptimization
2161 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2162   assert(callee_locals >= callee_parameters,
2163           "test and remove; got more parms than locals");
2164   if (callee_locals < callee_parameters)
2165     return 0;                   // No adjustment for negative locals
2166   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2167   // diff is counted in stack words
2168   return align_up(diff, 2);
2169 }
2170 
2171 
2172 //------------------------------generate_deopt_blob----------------------------
2173 void SharedRuntime::generate_deopt_blob() {
2174   // Allocate space for the code
2175   ResourceMark rm;
2176   // Setup code generation tools
2177   int pad = 0;
2178 #if INCLUDE_JVMCI
2179   if (EnableJVMCI || UseAOT) {
2180     pad += 512; // Increase the buffer size when compiling for JVMCI
2181   }
2182 #endif
2183   CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2184   MacroAssembler* masm = new MacroAssembler(&buffer);
2185   int frame_size_in_words;
2186   OopMap* map = NULL;
2187   OopMapSet *oop_maps = new OopMapSet();
2188 
2189   // -------------
2190   // This code enters when returning to a de-optimized nmethod.  A return
2191   // address has been pushed on the the stack, and return values are in
2192   // registers.
2193   // If we are doing a normal deopt then we were called from the patched
2194   // nmethod from the point we returned to the nmethod. So the return
2195   // address on the stack is wrong by NativeCall::instruction_size
2196   // We will adjust the value so it looks like we have the original return
2197   // address on the stack (like when we eagerly deoptimized).
2198   // In the case of an exception pending when deoptimizing, we enter
2199   // with a return address on the stack that points after the call we patched
2200   // into the exception handler. We have the following register state from,
2201   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2202   //    r0: exception oop
2203   //    r19: exception handler
2204   //    r3: throwing pc
2205   // So in this case we simply jam r3 into the useless return address and
2206   // the stack looks just like we want.
2207   //
2208   // At this point we need to de-opt.  We save the argument return
2209   // registers.  We call the first C routine, fetch_unroll_info().  This
2210   // routine captures the return values and returns a structure which
2211   // describes the current frame size and the sizes of all replacement frames.
2212   // The current frame is compiled code and may contain many inlined
2213   // functions, each with their own JVM state.  We pop the current frame, then
2214   // push all the new frames.  Then we call the C routine unpack_frames() to
2215   // populate these frames.  Finally unpack_frames() returns us the new target
2216   // address.  Notice that callee-save registers are BLOWN here; they have
2217   // already been captured in the vframeArray at the time the return PC was
2218   // patched.
2219   address start = __ pc();
2220   Label cont;
2221 
2222   // Prolog for non exception case!
2223 
2224   // Save everything in sight.
2225   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2226 
2227   // Normal deoptimization.  Save exec mode for unpack_frames.
2228   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2229   __ b(cont);
2230 
2231   int reexecute_offset = __ pc() - start;
2232 #if INCLUDE_JVMCI && !defined(COMPILER1)
2233   if (EnableJVMCI && UseJVMCICompiler) {
2234     // JVMCI does not use this kind of deoptimization
2235     __ should_not_reach_here();
2236   }
2237 #endif
2238 
2239   // Reexecute case
2240   // return address is the pc describes what bci to do re-execute at
2241 
2242   // No need to update map as each call to save_live_registers will produce identical oopmap
2243   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2244 
2245   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2246   __ b(cont);
2247 
2248 #if INCLUDE_JVMCI
2249   Label after_fetch_unroll_info_call;
2250   int implicit_exception_uncommon_trap_offset = 0;
2251   int uncommon_trap_offset = 0;
2252 
2253   if (EnableJVMCI || UseAOT) {
2254     implicit_exception_uncommon_trap_offset = __ pc() - start;
2255 
2256     __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2257     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2258 
2259     uncommon_trap_offset = __ pc() - start;
2260 
2261     // Save everything in sight.
2262     RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2263     // fetch_unroll_info needs to call last_java_frame()
2264     Label retaddr;
2265     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2266 
2267     __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2268     __ movw(rscratch1, -1);
2269     __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2270 
2271     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2272     __ mov(c_rarg0, rthread);
2273     __ movw(c_rarg2, rcpool); // exec mode
2274     __ lea(rscratch1,
2275            RuntimeAddress(CAST_FROM_FN_PTR(address,
2276                                            Deoptimization::uncommon_trap)));
2277     __ blr(rscratch1);
2278     __ bind(retaddr);
2279     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2280 
2281     __ reset_last_Java_frame(false);
2282 
2283     __ b(after_fetch_unroll_info_call);
2284   } // EnableJVMCI
2285 #endif // INCLUDE_JVMCI
2286 
2287   int exception_offset = __ pc() - start;
2288 
2289   // Prolog for exception case
2290 
2291   // all registers are dead at this entry point, except for r0, and
2292   // r3 which contain the exception oop and exception pc
2293   // respectively.  Set them in TLS and fall thru to the
2294   // unpack_with_exception_in_tls entry point.
2295 
2296   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2297   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2298 
2299   int exception_in_tls_offset = __ pc() - start;
2300 
2301   // new implementation because exception oop is now passed in JavaThread
2302 
2303   // Prolog for exception case
2304   // All registers must be preserved because they might be used by LinearScan
2305   // Exceptiop oop and throwing PC are passed in JavaThread
2306   // tos: stack at point of call to method that threw the exception (i.e. only
2307   // args are on the stack, no return address)
2308 
2309   // The return address pushed by save_live_registers will be patched
2310   // later with the throwing pc. The correct value is not available
2311   // now because loading it from memory would destroy registers.
2312 
2313   // NB: The SP at this point must be the SP of the method that is
2314   // being deoptimized.  Deoptimization assumes that the frame created
2315   // here by save_live_registers is immediately below the method's SP.
2316   // This is a somewhat fragile mechanism.
2317 
2318   // Save everything in sight.
2319   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2320 
2321   // Now it is safe to overwrite any register
2322 
2323   // Deopt during an exception.  Save exec mode for unpack_frames.
2324   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2325 
2326   // load throwing pc from JavaThread and patch it as the return address
2327   // of the current frame. Then clear the field in JavaThread
2328 
2329   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2330   __ str(r3, Address(rfp, wordSize));
2331   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2332 
2333 #ifdef ASSERT
2334   // verify that there is really an exception oop in JavaThread
2335   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2336   __ verify_oop(r0);
2337 
2338   // verify that there is no pending exception
2339   Label no_pending_exception;
2340   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2341   __ cbz(rscratch1, no_pending_exception);
2342   __ stop("must not have pending exception here");
2343   __ bind(no_pending_exception);
2344 #endif
2345 
2346   __ bind(cont);
2347 
2348   // Call C code.  Need thread and this frame, but NOT official VM entry
2349   // crud.  We cannot block on this call, no GC can happen.
2350   //
2351   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2352 
2353   // fetch_unroll_info needs to call last_java_frame().
2354 
2355   Label retaddr;
2356   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2357 #ifdef ASSERT0
2358   { Label L;
2359     __ ldr(rscratch1, Address(rthread,
2360                               JavaThread::last_Java_fp_offset()));
2361     __ cbz(rscratch1, L);
2362     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2363     __ bind(L);
2364   }
2365 #endif // ASSERT
2366   __ mov(c_rarg0, rthread);
2367   __ mov(c_rarg1, rcpool);
2368   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2369   __ blr(rscratch1);
2370   __ bind(retaddr);
2371 
2372   // Need to have an oopmap that tells fetch_unroll_info where to
2373   // find any register it might need.
2374   oop_maps->add_gc_map(__ pc() - start, map);
2375 
2376   __ reset_last_Java_frame(false);
2377 
2378 #if INCLUDE_JVMCI
2379   if (EnableJVMCI || UseAOT) {
2380     __ bind(after_fetch_unroll_info_call);
2381   }
2382 #endif
2383 
2384   // Load UnrollBlock* into r5
2385   __ mov(r5, r0);
2386 
2387   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2388    Label noException;
2389   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2390   __ br(Assembler::NE, noException);
2391   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2392   // QQQ this is useless it was NULL above
2393   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2394   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2395   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2396 
2397   __ verify_oop(r0);
2398 
2399   // Overwrite the result registers with the exception results.
2400   __ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes()));
2401   // I think this is useless
2402   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2403 
2404   __ bind(noException);
2405 
2406   // Only register save data is on the stack.
2407   // Now restore the result registers.  Everything else is either dead
2408   // or captured in the vframeArray.
2409   RegisterSaver::restore_result_registers(masm);
2410 
2411   // All of the register save area has been popped of the stack. Only the
2412   // return address remains.
2413 
2414   // Pop all the frames we must move/replace.
2415   //
2416   // Frame picture (youngest to oldest)
2417   // 1: self-frame (no frame link)
2418   // 2: deopting frame  (no frame link)
2419   // 3: caller of deopting frame (could be compiled/interpreted).
2420   //
2421   // Note: by leaving the return address of self-frame on the stack
2422   // and using the size of frame 2 to adjust the stack
2423   // when we are done the return to frame 3 will still be on the stack.
2424 
2425   // Pop deoptimized frame
2426   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2427   __ sub(r2, r2, 2 * wordSize);
2428   __ add(sp, sp, r2);
2429   __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2430   // LR should now be the return address to the caller (3)
2431 
2432 #ifdef ASSERT
2433   // Compilers generate code that bang the stack by as much as the
2434   // interpreter would need. So this stack banging should never
2435   // trigger a fault. Verify that it does not on non product builds.
2436   if (UseStackBanging) {
2437     __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2438     __ bang_stack_size(r19, r2);
2439   }
2440 #endif
2441   // Load address of array of frame pcs into r2
2442   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2443 
2444   // Trash the old pc
2445   // __ addptr(sp, wordSize);  FIXME ????
2446 
2447   // Load address of array of frame sizes into r4
2448   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2449 
2450   // Load counter into r3
2451   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2452 
2453   // Now adjust the caller's stack to make up for the extra locals
2454   // but record the original sp so that we can save it in the skeletal interpreter
2455   // frame and the stack walking of interpreter_sender will get the unextended sp
2456   // value and not the "real" sp value.
2457 
2458   const Register sender_sp = r6;
2459 
2460   __ mov(sender_sp, sp);
2461   __ ldrw(r19, Address(r5,
2462                        Deoptimization::UnrollBlock::
2463                        caller_adjustment_offset_in_bytes()));
2464   __ sub(sp, sp, r19);
2465 
2466   // Push interpreter frames in a loop
2467   __ mov(rscratch1, (address)0xDEADDEAD);        // Make a recognizable pattern
2468   __ mov(rscratch2, rscratch1);
2469   Label loop;
2470   __ bind(loop);
2471   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2472   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2473   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2474   __ enter();                           // Save old & set new fp
2475   __ sub(sp, sp, r19);                  // Prolog
2476   // This value is corrected by layout_activation_impl
2477   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2478   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2479   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2480   __ sub(r3, r3, 1);                   // Decrement counter
2481   __ cbnz(r3, loop);
2482 
2483     // Re-push self-frame
2484   __ ldr(lr, Address(r2));
2485   __ enter();
2486 
2487   // Allocate a full sized register save area.  We subtract 2 because
2488   // enter() just pushed 2 words
2489   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2490 
2491   // Restore frame locals after moving the frame
2492   __ strd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes()));
2493   __ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes()));
2494 
2495   // Call C code.  Need thread but NOT official VM entry
2496   // crud.  We cannot block on this call, no GC can happen.  Call should
2497   // restore return values to their stack-slots with the new SP.
2498   //
2499   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2500 
2501   // Use rfp because the frames look interpreted now
2502   // Don't need the precise return PC here, just precise enough to point into this code blob.
2503   address the_pc = __ pc();
2504   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2505 
2506   __ mov(c_rarg0, rthread);
2507   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2508   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2509   __ blr(rscratch1);
2510 
2511   // Set an oopmap for the call site
2512   // Use the same PC we used for the last java frame
2513   oop_maps->add_gc_map(the_pc - start,
2514                        new OopMap( frame_size_in_words, 0 ));
2515 
2516   // Clear fp AND pc
2517   __ reset_last_Java_frame(true);
2518 
2519   // Collect return values
2520   __ ldrd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes()));
2521   __ ldr(r0, Address(sp, RegisterSaver::r0_offset_in_bytes()));
2522   // I think this is useless (throwing pc?)
2523   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2524 
2525   // Pop self-frame.
2526   __ leave();                           // Epilog
2527 
2528   // Jump to interpreter
2529   __ ret(lr);
2530 
2531   // Make sure all code is generated
2532   masm->flush();
2533 
2534   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2535   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2536 #if INCLUDE_JVMCI
2537   if (EnableJVMCI || UseAOT) {
2538     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2539     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2540   }
2541 #endif
2542 }
2543 
2544 uint SharedRuntime::out_preserve_stack_slots() {
2545   return 0;
2546 }
2547 
2548 #if COMPILER2_OR_JVMCI
2549 //------------------------------generate_uncommon_trap_blob--------------------
2550 void SharedRuntime::generate_uncommon_trap_blob() {
2551   // Allocate space for the code
2552   ResourceMark rm;
2553   // Setup code generation tools
2554   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2555   MacroAssembler* masm = new MacroAssembler(&buffer);
2556 
2557   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2558 
2559   address start = __ pc();
2560 
2561   // Push self-frame.  We get here with a return address in LR
2562   // and sp should be 16 byte aligned
2563   // push rfp and retaddr by hand
2564   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2565   // we don't expect an arg reg save area
2566 #ifndef PRODUCT
2567   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2568 #endif
2569   // compiler left unloaded_class_index in j_rarg0 move to where the
2570   // runtime expects it.
2571   if (c_rarg1 != j_rarg0) {
2572     __ movw(c_rarg1, j_rarg0);
2573   }
2574 
2575   // we need to set the past SP to the stack pointer of the stub frame
2576   // and the pc to the address where this runtime call will return
2577   // although actually any pc in this code blob will do).
2578   Label retaddr;
2579   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2580 
2581   // Call C code.  Need thread but NOT official VM entry
2582   // crud.  We cannot block on this call, no GC can happen.  Call should
2583   // capture callee-saved registers as well as return values.
2584   // Thread is in rdi already.
2585   //
2586   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2587   //
2588   // n.b. 2 gp args, 0 fp args, integral return type
2589 
2590   __ mov(c_rarg0, rthread);
2591   __ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
2592   __ lea(rscratch1,
2593          RuntimeAddress(CAST_FROM_FN_PTR(address,
2594                                          Deoptimization::uncommon_trap)));
2595   __ blr(rscratch1);
2596   __ bind(retaddr);
2597 
2598   // Set an oopmap for the call site
2599   OopMapSet* oop_maps = new OopMapSet();
2600   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2601 
2602   // location of rfp is known implicitly by the frame sender code
2603 
2604   oop_maps->add_gc_map(__ pc() - start, map);
2605 
2606   __ reset_last_Java_frame(false);
2607 
2608   // move UnrollBlock* into r4
2609   __ mov(r4, r0);
2610 
2611 #ifdef ASSERT
2612   { Label L;
2613     __ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2614     __ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2615     __ br(Assembler::EQ, L);
2616     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2617     __ bind(L);
2618   }
2619 #endif
2620 
2621   // Pop all the frames we must move/replace.
2622   //
2623   // Frame picture (youngest to oldest)
2624   // 1: self-frame (no frame link)
2625   // 2: deopting frame  (no frame link)
2626   // 3: caller of deopting frame (could be compiled/interpreted).
2627 
2628   // Pop self-frame.  We have no frame, and must rely only on r0 and sp.
2629   __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog!
2630 
2631   // Pop deoptimized frame (int)
2632   __ ldrw(r2, Address(r4,
2633                       Deoptimization::UnrollBlock::
2634                       size_of_deoptimized_frame_offset_in_bytes()));
2635   __ sub(r2, r2, 2 * wordSize);
2636   __ add(sp, sp, r2);
2637   __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2638   // LR should now be the return address to the caller (3) frame
2639 
2640 #ifdef ASSERT
2641   // Compilers generate code that bang the stack by as much as the
2642   // interpreter would need. So this stack banging should never
2643   // trigger a fault. Verify that it does not on non product builds.
2644   if (UseStackBanging) {
2645     __ ldrw(r1, Address(r4,
2646                         Deoptimization::UnrollBlock::
2647                         total_frame_sizes_offset_in_bytes()));
2648     __ bang_stack_size(r1, r2);
2649   }
2650 #endif
2651 
2652   // Load address of array of frame pcs into r2 (address*)
2653   __ ldr(r2, Address(r4,
2654                      Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2655 
2656   // Load address of array of frame sizes into r5 (intptr_t*)
2657   __ ldr(r5, Address(r4,
2658                      Deoptimization::UnrollBlock::
2659                      frame_sizes_offset_in_bytes()));
2660 
2661   // Counter
2662   __ ldrw(r3, Address(r4,
2663                       Deoptimization::UnrollBlock::
2664                       number_of_frames_offset_in_bytes())); // (int)
2665 
2666   // Now adjust the caller's stack to make up for the extra locals but
2667   // record the original sp so that we can save it in the skeletal
2668   // interpreter frame and the stack walking of interpreter_sender
2669   // will get the unextended sp value and not the "real" sp value.
2670 
2671   const Register sender_sp = r8;
2672 
2673   __ mov(sender_sp, sp);
2674   __ ldrw(r1, Address(r4,
2675                       Deoptimization::UnrollBlock::
2676                       caller_adjustment_offset_in_bytes())); // (int)
2677   __ sub(sp, sp, r1);
2678 
2679   // Push interpreter frames in a loop
2680   Label loop;
2681   __ bind(loop);
2682   __ ldr(r1, Address(r5, 0));       // Load frame size
2683   __ sub(r1, r1, 2 * wordSize);     // We'll push pc and rfp by hand
2684   __ ldr(lr, Address(r2, 0));       // Save return address
2685   __ enter();                       // and old rfp & set new rfp
2686   __ sub(sp, sp, r1);               // Prolog
2687   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2688   // This value is corrected by layout_activation_impl
2689   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2690   __ mov(sender_sp, sp);          // Pass sender_sp to next frame
2691   __ add(r5, r5, wordSize);       // Bump array pointer (sizes)
2692   __ add(r2, r2, wordSize);       // Bump array pointer (pcs)
2693   __ subsw(r3, r3, 1);            // Decrement counter
2694   __ br(Assembler::GT, loop);
2695   __ ldr(lr, Address(r2, 0));     // save final return address
2696   // Re-push self-frame
2697   __ enter();                     // & old rfp & set new rfp
2698 
2699   // Use rfp because the frames look interpreted now
2700   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2701   // Don't need the precise return PC here, just precise enough to point into this code blob.
2702   address the_pc = __ pc();
2703   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2704 
2705   // Call C code.  Need thread but NOT official VM entry
2706   // crud.  We cannot block on this call, no GC can happen.  Call should
2707   // restore return values to their stack-slots with the new SP.
2708   // Thread is in rdi already.
2709   //
2710   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
2711   //
2712   // n.b. 2 gp args, 0 fp args, integral return type
2713 
2714   // sp should already be aligned
2715   __ mov(c_rarg0, rthread);
2716   __ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2717   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2718   __ blr(rscratch1);
2719 
2720   // Set an oopmap for the call site
2721   // Use the same PC we used for the last java frame
2722   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2723 
2724   // Clear fp AND pc
2725   __ reset_last_Java_frame(true);
2726 
2727   // Pop self-frame.
2728   __ leave();                 // Epilog
2729 
2730   // Jump to interpreter
2731   __ ret(lr);
2732 
2733   // Make sure all code is generated
2734   masm->flush();
2735 
2736   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
2737                                                  SimpleRuntimeFrame::framesize >> 1);
2738 }
2739 #endif // COMPILER2_OR_JVMCI
2740 
2741 
2742 //------------------------------generate_handler_blob------
2743 //
2744 // Generate a special Compile2Runtime blob that saves all registers,
2745 // and setup oopmap.
2746 //
2747 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2748   ResourceMark rm;
2749   OopMapSet *oop_maps = new OopMapSet();
2750   OopMap* map;
2751 
2752   // Allocate space for the code.  Setup code generation tools.
2753   CodeBuffer buffer("handler_blob", 2048, 1024);
2754   MacroAssembler* masm = new MacroAssembler(&buffer);
2755 
2756   address start   = __ pc();
2757   address call_pc = NULL;
2758   int frame_size_in_words;
2759   bool cause_return = (poll_type == POLL_AT_RETURN);
2760   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2761 
2762   // Save Integer and Float registers.
2763   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
2764 
2765   // The following is basically a call_VM.  However, we need the precise
2766   // address of the call in order to generate an oopmap. Hence, we do all the
2767   // work outselves.
2768 
2769   Label retaddr;
2770   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2771 
2772   // The return address must always be correct so that frame constructor never
2773   // sees an invalid pc.
2774 
2775   if (!cause_return) {
2776     // overwrite the return address pushed by save_live_registers
2777     // Additionally, r20 is a callee-saved register so we can look at
2778     // it later to determine if someone changed the return address for
2779     // us!
2780     __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2781     __ str(r20, Address(rfp, wordSize));
2782   }
2783 
2784   // Do the call
2785   __ mov(c_rarg0, rthread);
2786   __ lea(rscratch1, RuntimeAddress(call_ptr));
2787   __ blr(rscratch1);
2788   __ bind(retaddr);
2789 
2790   // Set an oopmap for the call site.  This oopmap will map all
2791   // oop-registers and debug-info registers as callee-saved.  This
2792   // will allow deoptimization at this safepoint to find all possible
2793   // debug-info recordings, as well as let GC find all oops.
2794 
2795   oop_maps->add_gc_map( __ pc() - start, map);
2796 
2797   Label noException;
2798 
2799   __ reset_last_Java_frame(false);
2800 
2801   __ maybe_isb();
2802   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2803 
2804   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2805   __ cbz(rscratch1, noException);
2806 
2807   // Exception pending
2808 
2809   RegisterSaver::restore_live_registers(masm, save_vectors);
2810 
2811   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2812 
2813   // No exception case
2814   __ bind(noException);
2815 
2816   Label no_adjust, bail;
2817   if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
2818     // If our stashed return pc was modified by the runtime we avoid touching it
2819     __ ldr(rscratch1, Address(rfp, wordSize));
2820     __ cmp(r20, rscratch1);
2821     __ br(Assembler::NE, no_adjust);
2822 
2823 #ifdef ASSERT
2824     // Verify the correct encoding of the poll we're about to skip.
2825     // See NativeInstruction::is_ldrw_to_zr()
2826     __ ldrw(rscratch1, Address(r20));
2827     __ ubfx(rscratch2, rscratch1, 22, 10);
2828     __ cmpw(rscratch2, 0b1011100101);
2829     __ br(Assembler::NE, bail);
2830     __ ubfx(rscratch2, rscratch1, 0, 5);
2831     __ cmpw(rscratch2, 0b11111);
2832     __ br(Assembler::NE, bail);
2833 #endif
2834     // Adjust return pc forward to step over the safepoint poll instruction
2835     __ add(r20, r20, NativeInstruction::instruction_size);
2836     __ str(r20, Address(rfp, wordSize));
2837   }
2838 
2839   __ bind(no_adjust);
2840   // Normal exit, restore registers and exit.
2841   RegisterSaver::restore_live_registers(masm, save_vectors);
2842 
2843   __ ret(lr);
2844 
2845 #ifdef ASSERT
2846   __ bind(bail);
2847   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2848 #endif
2849 
2850   // Make sure all code is generated
2851   masm->flush();
2852 
2853   // Fill-out other meta info
2854   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2855 }
2856 
2857 //
2858 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2859 //
2860 // Generate a stub that calls into vm to find out the proper destination
2861 // of a java call. All the argument registers are live at this point
2862 // but since this is generic code we don't know what they are and the caller
2863 // must do any gc of the args.
2864 //
2865 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2866   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2867 
2868   // allocate space for the code
2869   ResourceMark rm;
2870 
2871   CodeBuffer buffer(name, 1000, 512);
2872   MacroAssembler* masm                = new MacroAssembler(&buffer);
2873 
2874   int frame_size_in_words;
2875 
2876   OopMapSet *oop_maps = new OopMapSet();
2877   OopMap* map = NULL;
2878 
2879   int start = __ offset();
2880 
2881   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2882 
2883   int frame_complete = __ offset();
2884 
2885   {
2886     Label retaddr;
2887     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2888 
2889     __ mov(c_rarg0, rthread);
2890     __ lea(rscratch1, RuntimeAddress(destination));
2891 
2892     __ blr(rscratch1);
2893     __ bind(retaddr);
2894   }
2895 
2896   // Set an oopmap for the call site.
2897   // We need this not only for callee-saved registers, but also for volatile
2898   // registers that the compiler might be keeping live across a safepoint.
2899 
2900   oop_maps->add_gc_map( __ offset() - start, map);
2901 
2902   __ maybe_isb();
2903 
2904   // r0 contains the address we are going to jump to assuming no exception got installed
2905 
2906   // clear last_Java_sp
2907   __ reset_last_Java_frame(false);
2908   // check for pending exceptions
2909   Label pending;
2910   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2911   __ cbnz(rscratch1, pending);
2912 
2913   // get the returned Method*
2914   __ get_vm_result_2(rmethod, rthread);
2915   __ str(rmethod, Address(sp, RegisterSaver::reg_offset_in_bytes(rmethod)));
2916 
2917   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2918   __ str(r0, Address(sp, RegisterSaver::rscratch1_offset_in_bytes()));
2919   RegisterSaver::restore_live_registers(masm);
2920 
2921   // We are back the the original state on entry and ready to go.
2922 
2923   __ br(rscratch1);
2924 
2925   // Pending exception after the safepoint
2926 
2927   __ bind(pending);
2928 
2929   RegisterSaver::restore_live_registers(masm);
2930 
2931   // exception pending => remove activation and forward to exception handler
2932 
2933   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2934 
2935   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2936   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2937 
2938   // -------------
2939   // make sure all code is generated
2940   masm->flush();
2941 
2942   // return the  blob
2943   // frame_size_words or bytes??
2944   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2945 }
2946 
2947 #if COMPILER2_OR_JVMCI
2948 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
2949 //
2950 //------------------------------generate_exception_blob---------------------------
2951 // creates exception blob at the end
2952 // Using exception blob, this code is jumped from a compiled method.
2953 // (see emit_exception_handler in x86_64.ad file)
2954 //
2955 // Given an exception pc at a call we call into the runtime for the
2956 // handler in this method. This handler might merely restore state
2957 // (i.e. callee save registers) unwind the frame and jump to the
2958 // exception handler for the nmethod if there is no Java level handler
2959 // for the nmethod.
2960 //
2961 // This code is entered with a jmp.
2962 //
2963 // Arguments:
2964 //   r0: exception oop
2965 //   r3: exception pc
2966 //
2967 // Results:
2968 //   r0: exception oop
2969 //   r3: exception pc in caller or ???
2970 //   destination: exception handler of caller
2971 //
2972 // Note: the exception pc MUST be at a call (precise debug information)
2973 //       Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved.
2974 //
2975 
2976 void OptoRuntime::generate_exception_blob() {
2977   assert(!OptoRuntime::is_callee_saved_register(R3_num), "");
2978   assert(!OptoRuntime::is_callee_saved_register(R0_num), "");
2979   assert(!OptoRuntime::is_callee_saved_register(R2_num), "");
2980 
2981   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2982 
2983   // Allocate space for the code
2984   ResourceMark rm;
2985   // Setup code generation tools
2986   CodeBuffer buffer("exception_blob", 2048, 1024);
2987   MacroAssembler* masm = new MacroAssembler(&buffer);
2988 
2989   // TODO check various assumptions made here
2990   //
2991   // make sure we do so before running this
2992 
2993   address start = __ pc();
2994 
2995   // push rfp and retaddr by hand
2996   // Exception pc is 'return address' for stack walker
2997   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2998   // there are no callee save registers and we don't expect an
2999   // arg reg save area
3000 #ifndef PRODUCT
3001   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
3002 #endif
3003   // Store exception in Thread object. We cannot pass any arguments to the
3004   // handle_exception call, since we do not want to make any assumption
3005   // about the size of the frame where the exception happened in.
3006   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
3007   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
3008 
3009   // This call does all the hard work.  It checks if an exception handler
3010   // exists in the method.
3011   // If so, it returns the handler address.
3012   // If not, it prepares for stack-unwinding, restoring the callee-save
3013   // registers of the frame being removed.
3014   //
3015   // address OptoRuntime::handle_exception_C(JavaThread* thread)
3016   //
3017   // n.b. 1 gp arg, 0 fp args, integral return type
3018 
3019   // the stack should always be aligned
3020   address the_pc = __ pc();
3021   __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
3022   __ mov(c_rarg0, rthread);
3023   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3024   __ blr(rscratch1);
3025   __ maybe_isb();
3026 
3027   // Set an oopmap for the call site.  This oopmap will only be used if we
3028   // are unwinding the stack.  Hence, all locations will be dead.
3029   // Callee-saved registers will be the same as the frame above (i.e.,
3030   // handle_exception_stub), since they were restored when we got the
3031   // exception.
3032 
3033   OopMapSet* oop_maps = new OopMapSet();
3034 
3035   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3036 
3037   __ reset_last_Java_frame(false);
3038 
3039   // Restore callee-saved registers
3040 
3041   // rfp is an implicitly saved callee saved register (i.e. the calling
3042   // convention will save restore it in prolog/epilog) Other than that
3043   // there are no callee save registers now that adapter frames are gone.
3044   // and we dont' expect an arg reg save area
3045   __ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize)));
3046 
3047   // r0: exception handler
3048 
3049   // We have a handler in r0 (could be deopt blob).
3050   __ mov(r8, r0);
3051 
3052   // Get the exception oop
3053   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
3054   // Get the exception pc in case we are deoptimized
3055   __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset()));
3056 #ifdef ASSERT
3057   __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3058   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3059 #endif
3060   // Clear the exception oop so GC no longer processes it as a root.
3061   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3062 
3063   // r0: exception oop
3064   // r8:  exception handler
3065   // r4: exception pc
3066   // Jump to handler
3067 
3068   __ br(r8);
3069 
3070   // Make sure all code is generated
3071   masm->flush();
3072 
3073   // Set exception blob
3074   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3075 }
3076 #endif // COMPILER2_OR_JVMCI