1 /*
   2  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interp_masm.hpp"
  34 #include "oops/compiledICHolder.hpp"
  35 #include "prims/jvmtiRedefineClassesTrace.hpp"
  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/vframeArray.hpp"
  38 #include "vmreg_aarch64.inline.hpp"
  39 #ifdef COMPILER1
  40 #include "c1/c1_Runtime1.hpp"
  41 #endif
  42 #ifdef COMPILER2
  43 #include "adfiles/ad_aarch64.hpp"
  44 #include "opto/runtime.hpp"
  45 #endif
  46 
  47 #ifdef BUILTIN_SIM
  48 #include "../../../../../../simulator/simulator.hpp"
  49 #endif
  50 
  51 #define __ masm->
  52 
  53 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  54 
  55 class SimpleRuntimeFrame {
  56 
  57   public:
  58 
  59   // Most of the runtime stubs have this simple frame layout.
  60   // This class exists to make the layout shared in one place.
  61   // Offsets are for compiler stack slots, which are jints.
  62   enum layout {
  63     // The frame sender code expects that rbp will be in the "natural" place and
  64     // will override any oopMap setting for it. We must therefore force the layout
  65     // so that it agrees with the frame sender code.
  66     // we don't expect any arg reg save area so aarch64 asserts that
  67     // frame::arg_reg_save_area_bytes == 0
  68     rbp_off = 0,
  69     rbp_off2,
  70     return_off, return_off2,
  71     framesize
  72   };
  73 };
  74 
  75 // FIXME -- this is used by C1
  76 class RegisterSaver {
  77  public:
  78   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
  79   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
  80 
  81   // Offsets into the register save area
  82   // Used by deoptimization when it is managing result register
  83   // values on its own
  84 
  85   static int r0_offset_in_bytes(void)    { return (32 + r0->encoding()) * wordSize; }
  86   static int reg_offset_in_bytes(Register r)    { return r0_offset_in_bytes() + r->encoding() * wordSize; }
  87   static int rmethod_offset_in_bytes(void)    { return reg_offset_in_bytes(rmethod); }
  88   static int rscratch1_offset_in_bytes(void)    { return (32 + rscratch1->encoding()) * wordSize; }
  89   static int v0_offset_in_bytes(void)   { return 0; }
  90   static int return_offset_in_bytes(void) { return (32 /* floats*/ + 31 /* gregs*/) * wordSize; }
  91 
  92   // During deoptimization only the result registers need to be restored,
  93   // all the other values have already been extracted.
  94   static void restore_result_registers(MacroAssembler* masm);
  95 
  96     // Capture info about frame layout
  97   enum layout {
  98                 fpu_state_off = 0,
  99                 fpu_state_end = fpu_state_off+FPUStateSizeInWords-1,
 100                 // The frame sender code expects that rfp will be in
 101                 // the "natural" place and will override any oopMap
 102                 // setting for it. We must therefore force the layout
 103                 // so that it agrees with the frame sender code.
 104                 r0_off = fpu_state_off+FPUStateSizeInWords,
 105                 rfp_off = r0_off + 30 * 2,
 106                 return_off = rfp_off + 2,      // slot for return address
 107                 reg_save_size = return_off + 2};
 108 
 109 };
 110 
 111 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
 112 #ifdef COMPILER2
 113   if (save_vectors) {
 114     // Save upper half of vector registers
 115     int vect_words = 32 * 8 / wordSize;
 116     additional_frame_words += vect_words;
 117   }
 118 #else
 119   assert(!save_vectors, "vectors are generated only by C2");
 120 #endif
 121 
 122   int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
 123                                      reg_save_size*BytesPerInt, 16);
 124   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 125   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 126   // The caller will allocate additional_frame_words
 127   int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
 128   // CodeBlob frame size is in words.
 129   int frame_size_in_words = frame_size_in_bytes / wordSize;
 130   *total_frame_words = frame_size_in_words;
 131 
 132   // Save registers, fpu state, and flags.
 133 
 134   __ enter();
 135   __ push_CPU_state(save_vectors);
 136 
 137   // Set an oopmap for the call site.  This oopmap will map all
 138   // oop-registers and debug-info registers as callee-saved.  This
 139   // will allow deoptimization at this safepoint to find all possible
 140   // debug-info recordings, as well as let GC find all oops.
 141 
 142   OopMapSet *oop_maps = new OopMapSet();
 143   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 144 
 145   for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
 146     Register r = as_Register(i);
 147     if (r < rheapbase && r != rscratch1 && r != rscratch2) {
 148       int sp_offset = 2 * (i + 32); // SP offsets are in 4-byte words,
 149                                     // register slots are 8 bytes
 150                                     // wide, 32 floating-point
 151                                     // registers
 152       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots),
 153                                 r->as_VMReg());
 154     }
 155   }
 156 
 157   for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
 158     FloatRegister r = as_FloatRegister(i);
 159     int sp_offset = save_vectors ? (4 * i) : (2 * i);
 160     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
 161                               r->as_VMReg());
 162   }
 163 
 164   return oop_map;
 165 }
 166 
 167 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 168 #ifndef COMPILER2
 169   assert(!restore_vectors, "vectors are generated only by C2");
 170 #endif
 171   __ pop_CPU_state(restore_vectors);
 172   __ leave();
 173 }
 174 
 175 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 176 
 177   // Just restore result register. Only used by deoptimization. By
 178   // now any callee save register that needs to be restored to a c2
 179   // caller of the deoptee has been extracted into the vframeArray
 180   // and will be stuffed into the c2i adapter we create for later
 181   // restoration so only result registers need to be restored here.
 182 
 183   // Restore fp result register
 184   __ ldrd(v0, Address(sp, v0_offset_in_bytes()));
 185   // Restore integer result register
 186   __ ldr(r0, Address(sp, r0_offset_in_bytes()));
 187 
 188   // Pop all of the register save are off the stack
 189   __ add(sp, sp, round_to(return_offset_in_bytes(), 16));
 190 }
 191 
 192 // Is vector's size (in bytes) bigger than a size saved by default?
 193 // 8 bytes vector registers are saved by default on AArch64.
 194 bool SharedRuntime::is_wide_vector(int size) {
 195   return size > 8;
 196 }
 197 // The java_calling_convention describes stack locations as ideal slots on
 198 // a frame with no abi restrictions. Since we must observe abi restrictions
 199 // (like the placement of the register window) the slots must be biased by
 200 // the following value.
 201 static int reg2offset_in(VMReg r) {
 202   // Account for saved rfp and lr
 203   // This should really be in_preserve_stack_slots
 204   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
 205 }
 206 
 207 static int reg2offset_out(VMReg r) {
 208   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 209 }
 210 
 211 template <class T> static const T& min (const T& a, const T& b) {
 212   return (a > b) ? b : a;
 213 }
 214 
 215 // ---------------------------------------------------------------------------
 216 // Read the array of BasicTypes from a signature, and compute where the
 217 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 218 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 219 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 220 // as framesizes are fixed.
 221 // VMRegImpl::stack0 refers to the first slot 0(sp).
 222 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 223 // up to RegisterImpl::number_of_registers) are the 64-bit
 224 // integer registers.
 225 
 226 // Note: the INPUTS in sig_bt are in units of Java argument words,
 227 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 228 
 229 // The Java calling convention is a "shifted" version of the C ABI.
 230 // By skipping the first C ABI register we can call non-static jni
 231 // methods with small numbers of arguments without having to shuffle
 232 // the arguments at all. Since we control the java ABI we ought to at
 233 // least get some advantage out of it.
 234 
 235 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 236                                            VMRegPair *regs,
 237                                            int total_args_passed,
 238                                            int is_outgoing) {
 239 
 240   // Create the mapping between argument positions and
 241   // registers.
 242   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 243     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 244   };
 245   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 246     j_farg0, j_farg1, j_farg2, j_farg3,
 247     j_farg4, j_farg5, j_farg6, j_farg7
 248   };
 249 
 250 
 251   uint int_args = 0;
 252   uint fp_args = 0;
 253   uint stk_args = 0; // inc by 2 each time
 254 
 255   for (int i = 0; i < total_args_passed; i++) {
 256     switch (sig_bt[i]) {
 257     case T_BOOLEAN:
 258     case T_CHAR:
 259     case T_BYTE:
 260     case T_SHORT:
 261     case T_INT:
 262       if (int_args < Argument::n_int_register_parameters_j) {
 263         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 264       } else {
 265         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 266         stk_args += 2;
 267       }
 268       break;
 269     case T_VOID:
 270       // halves of T_LONG or T_DOUBLE
 271       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 272       regs[i].set_bad();
 273       break;
 274     case T_LONG:
 275       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 276       // fall through
 277     case T_OBJECT:
 278     case T_ARRAY:
 279     case T_ADDRESS:
 280       if (int_args < Argument::n_int_register_parameters_j) {
 281         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 282       } else {
 283         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 284         stk_args += 2;
 285       }
 286       break;
 287     case T_FLOAT:
 288       if (fp_args < Argument::n_float_register_parameters_j) {
 289         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 290       } else {
 291         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 292         stk_args += 2;
 293       }
 294       break;
 295     case T_DOUBLE:
 296       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 297       if (fp_args < Argument::n_float_register_parameters_j) {
 298         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 299       } else {
 300         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 301         stk_args += 2;
 302       }
 303       break;
 304     default:
 305       ShouldNotReachHere();
 306       break;
 307     }
 308   }
 309 
 310   return round_to(stk_args, 2);
 311 }
 312 
 313 // Patch the callers callsite with entry to compiled code if it exists.
 314 static void patch_callers_callsite(MacroAssembler *masm) {
 315   Label L;
 316   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 317   __ cbz(rscratch1, L);
 318 
 319   __ enter();
 320   __ push_CPU_state();
 321 
 322   // VM needs caller's callsite
 323   // VM needs target method
 324   // This needs to be a long call since we will relocate this adapter to
 325   // the codeBuffer and it may not reach
 326 
 327 #ifndef PRODUCT
 328   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 329 #endif
 330 
 331   __ mov(c_rarg0, rmethod);
 332   __ mov(c_rarg1, lr);
 333   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 334   __ blrt(rscratch1, 2, 0, 0);
 335   __ maybe_isb();
 336 
 337   __ pop_CPU_state();
 338   // restore sp
 339   __ leave();
 340   __ bind(L);
 341 }
 342 
 343 static void gen_c2i_adapter(MacroAssembler *masm,
 344                             int total_args_passed,
 345                             int comp_args_on_stack,
 346                             const BasicType *sig_bt,
 347                             const VMRegPair *regs,
 348                             Label& skip_fixup) {
 349   // Before we get into the guts of the C2I adapter, see if we should be here
 350   // at all.  We've come from compiled code and are attempting to jump to the
 351   // interpreter, which means the caller made a static call to get here
 352   // (vcalls always get a compiled target if there is one).  Check for a
 353   // compiled target.  If there is one, we need to patch the caller's call.
 354   patch_callers_callsite(masm);
 355 
 356   __ bind(skip_fixup);
 357 
 358   int words_pushed = 0;
 359 
 360   // Since all args are passed on the stack, total_args_passed *
 361   // Interpreter::stackElementSize is the space we need.
 362 
 363   int extraspace = total_args_passed * Interpreter::stackElementSize;
 364 
 365   __ mov(r13, sp);
 366 
 367   // stack is aligned, keep it that way
 368   extraspace = round_to(extraspace, 2*wordSize);
 369 
 370   if (extraspace)
 371     __ sub(sp, sp, extraspace);
 372 
 373   // Now write the args into the outgoing interpreter space
 374   for (int i = 0; i < total_args_passed; i++) {
 375     if (sig_bt[i] == T_VOID) {
 376       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 377       continue;
 378     }
 379 
 380     // offset to start parameters
 381     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 382     int next_off = st_off - Interpreter::stackElementSize;
 383 
 384     // Say 4 args:
 385     // i   st_off
 386     // 0   32 T_LONG
 387     // 1   24 T_VOID
 388     // 2   16 T_OBJECT
 389     // 3    8 T_BOOL
 390     // -    0 return address
 391     //
 392     // However to make thing extra confusing. Because we can fit a long/double in
 393     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 394     // leaves one slot empty and only stores to a single slot. In this case the
 395     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 396 
 397     VMReg r_1 = regs[i].first();
 398     VMReg r_2 = regs[i].second();
 399     if (!r_1->is_valid()) {
 400       assert(!r_2->is_valid(), "");
 401       continue;
 402     }
 403     if (r_1->is_stack()) {
 404       // memory to memory use rscratch1
 405       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 406                     + extraspace
 407                     + words_pushed * wordSize);
 408       if (!r_2->is_valid()) {
 409         // sign extend??
 410         __ ldrw(rscratch1, Address(sp, ld_off));
 411         __ str(rscratch1, Address(sp, st_off));
 412 
 413       } else {
 414 
 415         __ ldr(rscratch1, Address(sp, ld_off));
 416 
 417         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 418         // T_DOUBLE and T_LONG use two slots in the interpreter
 419         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 420           // ld_off == LSW, ld_off+wordSize == MSW
 421           // st_off == MSW, next_off == LSW
 422           __ str(rscratch1, Address(sp, next_off));
 423 #ifdef ASSERT
 424           // Overwrite the unused slot with known junk
 425           __ mov(rscratch1, 0xdeadffffdeadaaaaul);
 426           __ str(rscratch1, Address(sp, st_off));
 427 #endif /* ASSERT */
 428         } else {
 429           __ str(rscratch1, Address(sp, st_off));
 430         }
 431       }
 432     } else if (r_1->is_Register()) {
 433       Register r = r_1->as_Register();
 434       if (!r_2->is_valid()) {
 435         // must be only an int (or less ) so move only 32bits to slot
 436         // why not sign extend??
 437         __ str(r, Address(sp, st_off));
 438       } else {
 439         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 440         // T_DOUBLE and T_LONG use two slots in the interpreter
 441         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 442           // long/double in gpr
 443 #ifdef ASSERT
 444           // Overwrite the unused slot with known junk
 445           __ mov(rscratch1, 0xdeadffffdeadaaabul);
 446           __ str(rscratch1, Address(sp, st_off));
 447 #endif /* ASSERT */
 448           __ str(r, Address(sp, next_off));
 449         } else {
 450           __ str(r, Address(sp, st_off));
 451         }
 452       }
 453     } else {
 454       assert(r_1->is_FloatRegister(), "");
 455       if (!r_2->is_valid()) {
 456         // only a float use just part of the slot
 457         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 458       } else {
 459 #ifdef ASSERT
 460         // Overwrite the unused slot with known junk
 461         __ mov(rscratch1, 0xdeadffffdeadaaacul);
 462         __ str(rscratch1, Address(sp, st_off));
 463 #endif /* ASSERT */
 464         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 465       }
 466     }
 467   }
 468 
 469   __ mov(esp, sp); // Interp expects args on caller's expression stack
 470 
 471   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 472   __ br(rscratch1);
 473 }
 474 
 475 
 476 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 477                                     int total_args_passed,
 478                                     int comp_args_on_stack,
 479                                     const BasicType *sig_bt,
 480                                     const VMRegPair *regs) {
 481 
 482   // Note: r13 contains the senderSP on entry. We must preserve it since
 483   // we may do a i2c -> c2i transition if we lose a race where compiled
 484   // code goes non-entrant while we get args ready.
 485 
 486   // In addition we use r13 to locate all the interpreter args because
 487   // we must align the stack to 16 bytes.
 488 
 489   // Adapters are frameless.
 490 
 491   // An i2c adapter is frameless because the *caller* frame, which is
 492   // interpreted, routinely repairs its own esp (from
 493   // interpreter_frame_last_sp), even if a callee has modified the
 494   // stack pointer.  It also recalculates and aligns sp.
 495 
 496   // A c2i adapter is frameless because the *callee* frame, which is
 497   // interpreted, routinely repairs its caller's sp (from sender_sp,
 498   // which is set up via the senderSP register).
 499 
 500   // In other words, if *either* the caller or callee is interpreted, we can
 501   // get the stack pointer repaired after a call.
 502 
 503   // This is why c2i and i2c adapters cannot be indefinitely composed.
 504   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 505   // both caller and callee would be compiled methods, and neither would
 506   // clean up the stack pointer changes performed by the two adapters.
 507   // If this happens, control eventually transfers back to the compiled
 508   // caller, but with an uncorrected stack, causing delayed havoc.
 509 
 510   if (VerifyAdapterCalls &&
 511       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 512 #if 0
 513     // So, let's test for cascading c2i/i2c adapters right now.
 514     //  assert(Interpreter::contains($return_addr) ||
 515     //         StubRoutines::contains($return_addr),
 516     //         "i2c adapter must return to an interpreter frame");
 517     __ block_comment("verify_i2c { ");
 518     Label L_ok;
 519     if (Interpreter::code() != NULL)
 520       range_check(masm, rax, r11,
 521                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 522                   L_ok);
 523     if (StubRoutines::code1() != NULL)
 524       range_check(masm, rax, r11,
 525                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 526                   L_ok);
 527     if (StubRoutines::code2() != NULL)
 528       range_check(masm, rax, r11,
 529                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 530                   L_ok);
 531     const char* msg = "i2c adapter must return to an interpreter frame";
 532     __ block_comment(msg);
 533     __ stop(msg);
 534     __ bind(L_ok);
 535     __ block_comment("} verify_i2ce ");
 536 #endif
 537   }
 538 
 539   // Cut-out for having no stack args.
 540   int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 541   if (comp_args_on_stack) {
 542     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 543     __ andr(sp, rscratch1, -16);
 544   }
 545 
 546   // Will jump to the compiled code just as if compiled code was doing it.
 547   // Pre-load the register-jump target early, to schedule it better.
 548   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 549 
 550   // Now generate the shuffle code.
 551   for (int i = 0; i < total_args_passed; i++) {
 552     if (sig_bt[i] == T_VOID) {
 553       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 554       continue;
 555     }
 556 
 557     // Pick up 0, 1 or 2 words from SP+offset.
 558 
 559     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 560             "scrambled load targets?");
 561     // Load in argument order going down.
 562     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 563     // Point to interpreter value (vs. tag)
 564     int next_off = ld_off - Interpreter::stackElementSize;
 565     //
 566     //
 567     //
 568     VMReg r_1 = regs[i].first();
 569     VMReg r_2 = regs[i].second();
 570     if (!r_1->is_valid()) {
 571       assert(!r_2->is_valid(), "");
 572       continue;
 573     }
 574     if (r_1->is_stack()) {
 575       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 576       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 577       if (!r_2->is_valid()) {
 578         // sign extend???
 579         __ ldrsw(rscratch2, Address(esp, ld_off));
 580         __ str(rscratch2, Address(sp, st_off));
 581       } else {
 582         //
 583         // We are using two optoregs. This can be either T_OBJECT,
 584         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 585         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 586         // So we must adjust where to pick up the data to match the
 587         // interpreter.
 588         //
 589         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 590         // are accessed as negative so LSW is at LOW address
 591 
 592         // ld_off is MSW so get LSW
 593         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 594                            next_off : ld_off;
 595         __ ldr(rscratch2, Address(esp, offset));
 596         // st_off is LSW (i.e. reg.first())
 597         __ str(rscratch2, Address(sp, st_off));
 598       }
 599     } else if (r_1->is_Register()) {  // Register argument
 600       Register r = r_1->as_Register();
 601       if (r_2->is_valid()) {
 602         //
 603         // We are using two VMRegs. This can be either T_OBJECT,
 604         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 605         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 606         // So we must adjust where to pick up the data to match the
 607         // interpreter.
 608 
 609         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 610                            next_off : ld_off;
 611 
 612         // this can be a misaligned move
 613         __ ldr(r, Address(esp, offset));
 614       } else {
 615         // sign extend and use a full word?
 616         __ ldrw(r, Address(esp, ld_off));
 617       }
 618     } else {
 619       if (!r_2->is_valid()) {
 620         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 621       } else {
 622         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 623       }
 624     }
 625   }
 626 
 627   // 6243940 We might end up in handle_wrong_method if
 628   // the callee is deoptimized as we race thru here. If that
 629   // happens we don't want to take a safepoint because the
 630   // caller frame will look interpreted and arguments are now
 631   // "compiled" so it is much better to make this transition
 632   // invisible to the stack walking code. Unfortunately if
 633   // we try and find the callee by normal means a safepoint
 634   // is possible. So we stash the desired callee in the thread
 635   // and the vm will find there should this case occur.
 636 
 637   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 638 
 639   __ br(rscratch1);
 640 }
 641 
 642 #ifdef BUILTIN_SIM
 643 static void generate_i2c_adapter_name(char *result, int total_args_passed, const BasicType *sig_bt)
 644 {
 645   strcpy(result, "i2c(");
 646   int idx = 4;
 647   for (int i = 0; i < total_args_passed; i++) {
 648     switch(sig_bt[i]) {
 649     case T_BOOLEAN:
 650       result[idx++] = 'Z';
 651       break;
 652     case T_CHAR:
 653       result[idx++] = 'C';
 654       break;
 655     case T_FLOAT:
 656       result[idx++] = 'F';
 657       break;
 658     case T_DOUBLE:
 659       assert((i < (total_args_passed - 1)) && (sig_bt[i+1] == T_VOID),
 660              "double must be followed by void");
 661       i++;
 662       result[idx++] = 'D';
 663       break;
 664     case T_BYTE:
 665       result[idx++] = 'B';
 666       break;
 667     case T_SHORT:
 668       result[idx++] = 'S';
 669       break;
 670     case T_INT:
 671       result[idx++] = 'I';
 672       break;
 673     case T_LONG:
 674       assert((i < (total_args_passed - 1)) && (sig_bt[i+1] == T_VOID),
 675              "long must be followed by void");
 676       i++;
 677       result[idx++] = 'L';
 678       break;
 679     case T_OBJECT:
 680       result[idx++] = 'O';
 681       break;
 682     case T_ARRAY:
 683       result[idx++] = '[';
 684       break;
 685     case T_ADDRESS:
 686       result[idx++] = 'P';
 687       break;
 688     case T_NARROWOOP:
 689       result[idx++] = 'N';
 690       break;
 691     case T_METADATA:
 692       result[idx++] = 'M';
 693       break;
 694     case T_NARROWKLASS:
 695       result[idx++] = 'K';
 696       break;
 697     default:
 698       result[idx++] = '?';
 699       break;
 700     }
 701   }
 702   result[idx++] = ')';
 703   result[idx] = '\0';
 704 }
 705 #endif
 706 
 707 // ---------------------------------------------------------------
 708 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 709                                                             int total_args_passed,
 710                                                             int comp_args_on_stack,
 711                                                             const BasicType *sig_bt,
 712                                                             const VMRegPair *regs,
 713                                                             AdapterFingerPrint* fingerprint) {
 714   address i2c_entry = __ pc();
 715 #ifdef BUILTIN_SIM
 716   char *name = NULL;
 717   AArch64Simulator *sim = NULL;
 718   size_t len = 65536;
 719   if (NotifySimulator) {
 720     name = NEW_C_HEAP_ARRAY(char, len, mtInternal);
 721   }
 722 
 723   if (name) {
 724     generate_i2c_adapter_name(name, total_args_passed, sig_bt);
 725     sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
 726     sim->notifyCompile(name, i2c_entry);
 727   }
 728 #endif
 729   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 730 
 731   address c2i_unverified_entry = __ pc();
 732   Label skip_fixup;
 733 
 734   Label ok;
 735 
 736   Register holder = rscratch2;
 737   Register receiver = j_rarg0;
 738   Register tmp = r10;  // A call-clobbered register not used for arg passing
 739 
 740   // -------------------------------------------------------------------------
 741   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 742   // to the interpreter.  The args start out packed in the compiled layout.  They
 743   // need to be unpacked into the interpreter layout.  This will almost always
 744   // require some stack space.  We grow the current (compiled) stack, then repack
 745   // the args.  We  finally end in a jump to the generic interpreter entry point.
 746   // On exit from the interpreter, the interpreter will restore our SP (lest the
 747   // compiled code, which relys solely on SP and not FP, get sick).
 748 
 749   {
 750     __ block_comment("c2i_unverified_entry {");
 751     __ load_klass(rscratch1, receiver);
 752     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
 753     __ cmp(rscratch1, tmp);
 754     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_method_offset()));
 755     __ br(Assembler::EQ, ok);
 756     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 757 
 758     __ bind(ok);
 759     // Method might have been compiled since the call site was patched to
 760     // interpreted; if that is the case treat it as a miss so we can get
 761     // the call site corrected.
 762     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 763     __ cbz(rscratch1, skip_fixup);
 764     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 765     __ block_comment("} c2i_unverified_entry");
 766   }
 767 
 768   address c2i_entry = __ pc();
 769 
 770 #ifdef BUILTIN_SIM
 771   if (name) {
 772     name[0] = 'c';
 773     name[2] = 'i';
 774     sim->notifyCompile(name, c2i_entry);
 775     FREE_C_HEAP_ARRAY(char, name, mtInternal);
 776   }
 777 #endif
 778 
 779   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 780 
 781   __ flush();
 782   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 783 }
 784 
 785 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 786                                          VMRegPair *regs,
 787                                          VMRegPair *regs2,
 788                                          int total_args_passed) {
 789   assert(regs2 == NULL, "not needed on AArch64");
 790 
 791 // We return the amount of VMRegImpl stack slots we need to reserve for all
 792 // the arguments NOT counting out_preserve_stack_slots.
 793 
 794     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 795       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 796     };
 797     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 798       c_farg0, c_farg1, c_farg2, c_farg3,
 799       c_farg4, c_farg5, c_farg6, c_farg7
 800     };
 801 
 802     uint int_args = 0;
 803     uint fp_args = 0;
 804     uint stk_args = 0; // inc by 2 each time
 805 
 806     for (int i = 0; i < total_args_passed; i++) {
 807       switch (sig_bt[i]) {
 808       case T_BOOLEAN:
 809       case T_CHAR:
 810       case T_BYTE:
 811       case T_SHORT:
 812       case T_INT:
 813         if (int_args < Argument::n_int_register_parameters_c) {
 814           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 815         } else {
 816           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 817           stk_args += 2;
 818         }
 819         break;
 820       case T_LONG:
 821         assert(sig_bt[i + 1] == T_VOID, "expecting half");
 822         // fall through
 823       case T_OBJECT:
 824       case T_ARRAY:
 825       case T_ADDRESS:
 826       case T_METADATA:
 827         if (int_args < Argument::n_int_register_parameters_c) {
 828           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 829         } else {
 830           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 831           stk_args += 2;
 832         }
 833         break;
 834       case T_FLOAT:
 835         if (fp_args < Argument::n_float_register_parameters_c) {
 836           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 837         } else {
 838           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 839           stk_args += 2;
 840         }
 841         break;
 842       case T_DOUBLE:
 843         assert(sig_bt[i + 1] == T_VOID, "expecting half");
 844         if (fp_args < Argument::n_float_register_parameters_c) {
 845           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 846         } else {
 847           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 848           stk_args += 2;
 849         }
 850         break;
 851       case T_VOID: // Halves of longs and doubles
 852         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 853         regs[i].set_bad();
 854         break;
 855       default:
 856         ShouldNotReachHere();
 857         break;
 858       }
 859     }
 860 
 861   return stk_args;
 862 }
 863 
 864 // On 64 bit we will store integer like items to the stack as
 865 // 64 bits items (sparc abi) even though java would only store
 866 // 32bits for a parameter. On 32bit it will simply be 32 bits
 867 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
 868 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 869   if (src.first()->is_stack()) {
 870     if (dst.first()->is_stack()) {
 871       // stack to stack
 872       __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
 873       __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
 874     } else {
 875       // stack to reg
 876       __ ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
 877     }
 878   } else if (dst.first()->is_stack()) {
 879     // reg to stack
 880     // Do we really have to sign extend???
 881     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
 882     __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
 883   } else {
 884     if (dst.first() != src.first()) {
 885       __ sxtw(dst.first()->as_Register(), src.first()->as_Register());
 886     }
 887   }
 888 }
 889 
 890 // An oop arg. Must pass a handle not the oop itself
 891 static void object_move(MacroAssembler* masm,
 892                         OopMap* map,
 893                         int oop_handle_offset,
 894                         int framesize_in_slots,
 895                         VMRegPair src,
 896                         VMRegPair dst,
 897                         bool is_receiver,
 898                         int* receiver_offset) {
 899 
 900   // must pass a handle. First figure out the location we use as a handle
 901 
 902   Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
 903 
 904   // See if oop is NULL if it is we need no handle
 905 
 906   if (src.first()->is_stack()) {
 907 
 908     // Oop is already on the stack as an argument
 909     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 910     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
 911     if (is_receiver) {
 912       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
 913     }
 914 
 915     __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
 916     __ lea(rHandle, Address(rfp, reg2offset_in(src.first())));
 917     // conditionally move a NULL
 918     __ cmp(rscratch1, zr);
 919     __ csel(rHandle, zr, rHandle, Assembler::EQ);
 920   } else {
 921 
 922     // Oop is in an a register we must store it to the space we reserve
 923     // on the stack for oop_handles and pass a handle if oop is non-NULL
 924 
 925     const Register rOop = src.first()->as_Register();
 926     int oop_slot;
 927     if (rOop == j_rarg0)
 928       oop_slot = 0;
 929     else if (rOop == j_rarg1)
 930       oop_slot = 1;
 931     else if (rOop == j_rarg2)
 932       oop_slot = 2;
 933     else if (rOop == j_rarg3)
 934       oop_slot = 3;
 935     else if (rOop == j_rarg4)
 936       oop_slot = 4;
 937     else if (rOop == j_rarg5)
 938       oop_slot = 5;
 939     else if (rOop == j_rarg6)
 940       oop_slot = 6;
 941     else {
 942       assert(rOop == j_rarg7, "wrong register");
 943       oop_slot = 7;
 944     }
 945 
 946     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
 947     int offset = oop_slot*VMRegImpl::stack_slot_size;
 948 
 949     map->set_oop(VMRegImpl::stack2reg(oop_slot));
 950     // Store oop in handle area, may be NULL
 951     __ str(rOop, Address(sp, offset));
 952     if (is_receiver) {
 953       *receiver_offset = offset;
 954     }
 955 
 956     __ cmp(rOop, zr);
 957     __ lea(rHandle, Address(sp, offset));
 958     // conditionally move a NULL
 959     __ csel(rHandle, zr, rHandle, Assembler::EQ);
 960   }
 961 
 962   // If arg is on the stack then place it otherwise it is already in correct reg.
 963   if (dst.first()->is_stack()) {
 964     __ str(rHandle, Address(sp, reg2offset_out(dst.first())));
 965   }
 966 }
 967 
 968 // A float arg may have to do float reg int reg conversion
 969 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 970   if (src.first() != dst.first()) {
 971     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
 972       __ fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
 973     else
 974       ShouldNotReachHere();
 975   }
 976 }
 977 
 978 // A long move
 979 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 980   if (src.first()->is_stack()) {
 981     if (dst.first()->is_stack()) {
 982       // stack to stack
 983       __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
 984       __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
 985     } else {
 986       // stack to reg
 987       __ ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
 988     }
 989   } else if (dst.first()->is_stack()) {
 990     // reg to stack
 991     // Do we really have to sign extend???
 992     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
 993     __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
 994   } else {
 995     if (dst.first() != src.first()) {
 996       __ mov(dst.first()->as_Register(), src.first()->as_Register());
 997     }
 998   }
 999 }
1000 
1001 
1002 // A double move
1003 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1004   if (src.first() != dst.first()) {
1005     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
1006       __ fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1007     else
1008       ShouldNotReachHere();
1009   }
1010 }
1011 
1012 
1013 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1014   // We always ignore the frame_slots arg and just use the space just below frame pointer
1015   // which by this time is free to use
1016   switch (ret_type) {
1017   case T_FLOAT:
1018     __ strs(v0, Address(rfp, -wordSize));
1019     break;
1020   case T_DOUBLE:
1021     __ strd(v0, Address(rfp, -wordSize));
1022     break;
1023   case T_VOID:  break;
1024   default: {
1025     __ str(r0, Address(rfp, -wordSize));
1026     }
1027   }
1028 }
1029 
1030 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1031   // We always ignore the frame_slots arg and just use the space just below frame pointer
1032   // which by this time is free to use
1033   switch (ret_type) {
1034   case T_FLOAT:
1035     __ ldrs(v0, Address(rfp, -wordSize));
1036     break;
1037   case T_DOUBLE:
1038     __ ldrd(v0, Address(rfp, -wordSize));
1039     break;
1040   case T_VOID:  break;
1041   default: {
1042     __ ldr(r0, Address(rfp, -wordSize));
1043     }
1044   }
1045 }
1046 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1047   RegSet x;
1048   for ( int i = first_arg ; i < arg_count ; i++ ) {
1049     if (args[i].first()->is_Register()) {
1050       x = x + args[i].first()->as_Register();
1051     } else if (args[i].first()->is_FloatRegister()) {
1052       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
1053     }
1054   }
1055   __ push(x, sp);
1056 }
1057 
1058 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1059   RegSet x;
1060   for ( int i = first_arg ; i < arg_count ; i++ ) {
1061     if (args[i].first()->is_Register()) {
1062       x = x + args[i].first()->as_Register();
1063     } else {
1064       ;
1065     }
1066   }
1067   __ pop(x, sp);
1068   for ( int i = first_arg ; i < arg_count ; i++ ) {
1069     if (args[i].first()->is_Register()) {
1070       ;
1071     } else if (args[i].first()->is_FloatRegister()) {
1072       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
1073     }
1074   }
1075 }
1076 
1077 
1078 // Check GC_locker::needs_gc and enter the runtime if it's true.  This
1079 // keeps a new JNI critical region from starting until a GC has been
1080 // forced.  Save down any oops in registers and describe them in an
1081 // OopMap.
1082 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1083                                                int stack_slots,
1084                                                int total_c_args,
1085                                                int total_in_args,
1086                                                int arg_save_area,
1087                                                OopMapSet* oop_maps,
1088                                                VMRegPair* in_regs,
1089                                                BasicType* in_sig_bt) { Unimplemented(); }
1090 
1091 // Unpack an array argument into a pointer to the body and the length
1092 // if the array is non-null, otherwise pass 0 for both.
1093 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); }
1094 
1095 
1096 class ComputeMoveOrder: public StackObj {
1097   class MoveOperation: public ResourceObj {
1098     friend class ComputeMoveOrder;
1099    private:
1100     VMRegPair        _src;
1101     VMRegPair        _dst;
1102     int              _src_index;
1103     int              _dst_index;
1104     bool             _processed;
1105     MoveOperation*  _next;
1106     MoveOperation*  _prev;
1107 
1108     static int get_id(VMRegPair r) { Unimplemented(); return 0; }
1109 
1110    public:
1111     MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1112       _src(src)
1113     , _src_index(src_index)
1114     , _dst(dst)
1115     , _dst_index(dst_index)
1116     , _next(NULL)
1117     , _prev(NULL)
1118     , _processed(false) { Unimplemented(); }
1119 
1120     VMRegPair src() const              { Unimplemented(); return _src; }
1121     int src_id() const                 { Unimplemented(); return 0; }
1122     int src_index() const              { Unimplemented(); return 0; }
1123     VMRegPair dst() const              { Unimplemented(); return _src; }
1124     void set_dst(int i, VMRegPair dst) { Unimplemented(); }
1125     int dst_index() const              { Unimplemented(); return 0; }
1126     int dst_id() const                 { Unimplemented(); return 0; }
1127     MoveOperation* next() const        { Unimplemented(); return 0; }
1128     MoveOperation* prev() const        { Unimplemented(); return 0; }
1129     void set_processed()               { Unimplemented(); }
1130     bool is_processed() const          { Unimplemented(); return 0; }
1131 
1132     // insert
1133     void break_cycle(VMRegPair temp_register) { Unimplemented(); }
1134 
1135     void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); }
1136   };
1137 
1138  private:
1139   GrowableArray<MoveOperation*> edges;
1140 
1141  public:
1142   ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1143                     BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); }
1144 
1145   // Collected all the move operations
1146   void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); }
1147 
1148   // Walk the edges breaking cycles between moves.  The result list
1149   // can be walked in order to produce the proper set of loads
1150   GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; }
1151 };
1152 
1153 
1154 static void rt_call(MacroAssembler* masm, address dest, int gpargs, int fpargs, int type) {
1155   CodeBlob *cb = CodeCache::find_blob(dest);
1156   if (cb) {
1157     __ far_call(RuntimeAddress(dest));
1158   } else {
1159     assert((unsigned)gpargs < 256, "eek!");
1160     assert((unsigned)fpargs < 32, "eek!");
1161     __ lea(rscratch1, RuntimeAddress(dest));
1162     if (UseBuiltinSim)   __ mov(rscratch2, (gpargs << 6) | (fpargs << 2) | type);
1163     __ blrt(rscratch1, rscratch2);
1164     __ maybe_isb();
1165   }
1166 }
1167 
1168 static void verify_oop_args(MacroAssembler* masm,
1169                             methodHandle method,
1170                             const BasicType* sig_bt,
1171                             const VMRegPair* regs) {
1172   Register temp_reg = r19;  // not part of any compiled calling seq
1173   if (VerifyOops) {
1174     for (int i = 0; i < method->size_of_parameters(); i++) {
1175       if (sig_bt[i] == T_OBJECT ||
1176           sig_bt[i] == T_ARRAY) {
1177         VMReg r = regs[i].first();
1178         assert(r->is_valid(), "bad oop arg");
1179         if (r->is_stack()) {
1180           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1181           __ verify_oop(temp_reg);
1182         } else {
1183           __ verify_oop(r->as_Register());
1184         }
1185       }
1186     }
1187   }
1188 }
1189 
1190 static void gen_special_dispatch(MacroAssembler* masm,
1191                                  methodHandle method,
1192                                  const BasicType* sig_bt,
1193                                  const VMRegPair* regs) {
1194   verify_oop_args(masm, method, sig_bt, regs);
1195   vmIntrinsics::ID iid = method->intrinsic_id();
1196 
1197   // Now write the args into the outgoing interpreter space
1198   bool     has_receiver   = false;
1199   Register receiver_reg   = noreg;
1200   int      member_arg_pos = -1;
1201   Register member_reg     = noreg;
1202   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1203   if (ref_kind != 0) {
1204     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1205     member_reg = r19;  // known to be free at this point
1206     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1207   } else if (iid == vmIntrinsics::_invokeBasic) {
1208     has_receiver = true;
1209   } else {
1210     fatal("unexpected intrinsic id %d", iid);
1211   }
1212 
1213   if (member_reg != noreg) {
1214     // Load the member_arg into register, if necessary.
1215     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1216     VMReg r = regs[member_arg_pos].first();
1217     if (r->is_stack()) {
1218       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1219     } else {
1220       // no data motion is needed
1221       member_reg = r->as_Register();
1222     }
1223   }
1224 
1225   if (has_receiver) {
1226     // Make sure the receiver is loaded into a register.
1227     assert(method->size_of_parameters() > 0, "oob");
1228     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1229     VMReg r = regs[0].first();
1230     assert(r->is_valid(), "bad receiver arg");
1231     if (r->is_stack()) {
1232       // Porting note:  This assumes that compiled calling conventions always
1233       // pass the receiver oop in a register.  If this is not true on some
1234       // platform, pick a temp and load the receiver from stack.
1235       fatal("receiver always in a register");
1236       receiver_reg = r2;  // known to be free at this point
1237       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1238     } else {
1239       // no data motion is needed
1240       receiver_reg = r->as_Register();
1241     }
1242   }
1243 
1244   // Figure out which address we are really jumping to:
1245   MethodHandles::generate_method_handle_dispatch(masm, iid,
1246                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1247 }
1248 
1249 // ---------------------------------------------------------------------------
1250 // Generate a native wrapper for a given method.  The method takes arguments
1251 // in the Java compiled code convention, marshals them to the native
1252 // convention (handlizes oops, etc), transitions to native, makes the call,
1253 // returns to java state (possibly blocking), unhandlizes any result and
1254 // returns.
1255 //
1256 // Critical native functions are a shorthand for the use of
1257 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1258 // functions.  The wrapper is expected to unpack the arguments before
1259 // passing them to the callee and perform checks before and after the
1260 // native call to ensure that they GC_locker
1261 // lock_critical/unlock_critical semantics are followed.  Some other
1262 // parts of JNI setup are skipped like the tear down of the JNI handle
1263 // block and the check for pending exceptions it's impossible for them
1264 // to be thrown.
1265 //
1266 // They are roughly structured like this:
1267 //    if (GC_locker::needs_gc())
1268 //      SharedRuntime::block_for_jni_critical();
1269 //    tranistion to thread_in_native
1270 //    unpack arrray arguments and call native entry point
1271 //    check for safepoint in progress
1272 //    check if any thread suspend flags are set
1273 //      call into JVM and possible unlock the JNI critical
1274 //      if a GC was suppressed while in the critical native.
1275 //    transition back to thread_in_Java
1276 //    return to caller
1277 //
1278 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1279                                                 methodHandle method,
1280                                                 int compile_id,
1281                                                 BasicType* in_sig_bt,
1282                                                 VMRegPair* in_regs,
1283                                                 BasicType ret_type) {
1284 #ifdef BUILTIN_SIM
1285   if (NotifySimulator) {
1286     // Names are up to 65536 chars long.  UTF8-coded strings are up to
1287     // 3 bytes per character.  We concatenate three such strings.
1288     // Yes, I know this is ridiculous, but it's debug code and glibc
1289     // allocates large arrays very efficiently.
1290     size_t len = (65536 * 3) * 3;
1291     char *name = new char[len];
1292 
1293     strncpy(name, method()->method_holder()->name()->as_utf8(), len);
1294     strncat(name, ".", len);
1295     strncat(name, method()->name()->as_utf8(), len);
1296     strncat(name, method()->signature()->as_utf8(), len);
1297     AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck)->notifyCompile(name, __ pc());
1298     delete[] name;
1299   }
1300 #endif
1301 
1302   if (method->is_method_handle_intrinsic()) {
1303     vmIntrinsics::ID iid = method->intrinsic_id();
1304     intptr_t start = (intptr_t)__ pc();
1305     int vep_offset = ((intptr_t)__ pc()) - start;
1306 
1307     // First instruction must be a nop as it may need to be patched on deoptimisation
1308     __ nop();
1309     gen_special_dispatch(masm,
1310                          method,
1311                          in_sig_bt,
1312                          in_regs);
1313     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1314     __ flush();
1315     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1316     return nmethod::new_native_nmethod(method,
1317                                        compile_id,
1318                                        masm->code(),
1319                                        vep_offset,
1320                                        frame_complete,
1321                                        stack_slots / VMRegImpl::slots_per_word,
1322                                        in_ByteSize(-1),
1323                                        in_ByteSize(-1),
1324                                        (OopMapSet*)NULL);
1325   }
1326   bool is_critical_native = true;
1327   address native_func = method->critical_native_function();
1328   if (native_func == NULL) {
1329     native_func = method->native_function();
1330     is_critical_native = false;
1331   }
1332   assert(native_func != NULL, "must have function");
1333 
1334   // An OopMap for lock (and class if static)
1335   OopMapSet *oop_maps = new OopMapSet();
1336   intptr_t start = (intptr_t)__ pc();
1337 
1338   // We have received a description of where all the java arg are located
1339   // on entry to the wrapper. We need to convert these args to where
1340   // the jni function will expect them. To figure out where they go
1341   // we convert the java signature to a C signature by inserting
1342   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1343 
1344   const int total_in_args = method->size_of_parameters();
1345   int total_c_args = total_in_args;
1346   if (!is_critical_native) {
1347     total_c_args += 1;
1348     if (method->is_static()) {
1349       total_c_args++;
1350     }
1351   } else {
1352     for (int i = 0; i < total_in_args; i++) {
1353       if (in_sig_bt[i] == T_ARRAY) {
1354         total_c_args++;
1355       }
1356     }
1357   }
1358 
1359   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1360   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1361   BasicType* in_elem_bt = NULL;
1362 
1363   int argc = 0;
1364   if (!is_critical_native) {
1365     out_sig_bt[argc++] = T_ADDRESS;
1366     if (method->is_static()) {
1367       out_sig_bt[argc++] = T_OBJECT;
1368     }
1369 
1370     for (int i = 0; i < total_in_args ; i++ ) {
1371       out_sig_bt[argc++] = in_sig_bt[i];
1372     }
1373   } else {
1374     Thread* THREAD = Thread::current();
1375     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1376     SignatureStream ss(method->signature());
1377     for (int i = 0; i < total_in_args ; i++ ) {
1378       if (in_sig_bt[i] == T_ARRAY) {
1379         // Arrays are passed as int, elem* pair
1380         out_sig_bt[argc++] = T_INT;
1381         out_sig_bt[argc++] = T_ADDRESS;
1382         Symbol* atype = ss.as_symbol(CHECK_NULL);
1383         const char* at = atype->as_C_string();
1384         if (strlen(at) == 2) {
1385           assert(at[0] == '[', "must be");
1386           switch (at[1]) {
1387             case 'B': in_elem_bt[i]  = T_BYTE; break;
1388             case 'C': in_elem_bt[i]  = T_CHAR; break;
1389             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1390             case 'F': in_elem_bt[i]  = T_FLOAT; break;
1391             case 'I': in_elem_bt[i]  = T_INT; break;
1392             case 'J': in_elem_bt[i]  = T_LONG; break;
1393             case 'S': in_elem_bt[i]  = T_SHORT; break;
1394             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1395             default: ShouldNotReachHere();
1396           }
1397         }
1398       } else {
1399         out_sig_bt[argc++] = in_sig_bt[i];
1400         in_elem_bt[i] = T_VOID;
1401       }
1402       if (in_sig_bt[i] != T_VOID) {
1403         assert(in_sig_bt[i] == ss.type(), "must match");
1404         ss.next();
1405       }
1406     }
1407   }
1408 
1409   // Now figure out where the args must be stored and how much stack space
1410   // they require.
1411   int out_arg_slots;
1412   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1413 
1414   // Compute framesize for the wrapper.  We need to handlize all oops in
1415   // incoming registers
1416 
1417   // Calculate the total number of stack slots we will need.
1418 
1419   // First count the abi requirement plus all of the outgoing args
1420   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1421 
1422   // Now the space for the inbound oop handle area
1423   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1424   if (is_critical_native) {
1425     // Critical natives may have to call out so they need a save area
1426     // for register arguments.
1427     int double_slots = 0;
1428     int single_slots = 0;
1429     for ( int i = 0; i < total_in_args; i++) {
1430       if (in_regs[i].first()->is_Register()) {
1431         const Register reg = in_regs[i].first()->as_Register();
1432         switch (in_sig_bt[i]) {
1433           case T_BOOLEAN:
1434           case T_BYTE:
1435           case T_SHORT:
1436           case T_CHAR:
1437           case T_INT:  single_slots++; break;
1438           case T_ARRAY:  // specific to LP64 (7145024)
1439           case T_LONG: double_slots++; break;
1440           default:  ShouldNotReachHere();
1441         }
1442       } else if (in_regs[i].first()->is_FloatRegister()) {
1443         ShouldNotReachHere();
1444       }
1445     }
1446     total_save_slots = double_slots * 2 + single_slots;
1447     // align the save area
1448     if (double_slots != 0) {
1449       stack_slots = round_to(stack_slots, 2);
1450     }
1451   }
1452 
1453   int oop_handle_offset = stack_slots;
1454   stack_slots += total_save_slots;
1455 
1456   // Now any space we need for handlizing a klass if static method
1457 
1458   int klass_slot_offset = 0;
1459   int klass_offset = -1;
1460   int lock_slot_offset = 0;
1461   bool is_static = false;
1462 
1463   if (method->is_static()) {
1464     klass_slot_offset = stack_slots;
1465     stack_slots += VMRegImpl::slots_per_word;
1466     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1467     is_static = true;
1468   }
1469 
1470   // Plus a lock if needed
1471 
1472   if (method->is_synchronized()) {
1473     lock_slot_offset = stack_slots;
1474     stack_slots += VMRegImpl::slots_per_word;
1475   }
1476 
1477   // Now a place (+2) to save return values or temp during shuffling
1478   // + 4 for return address (which we own) and saved rfp
1479   stack_slots += 6;
1480 
1481   // Ok The space we have allocated will look like:
1482   //
1483   //
1484   // FP-> |                     |
1485   //      |---------------------|
1486   //      | 2 slots for moves   |
1487   //      |---------------------|
1488   //      | lock box (if sync)  |
1489   //      |---------------------| <- lock_slot_offset
1490   //      | klass (if static)   |
1491   //      |---------------------| <- klass_slot_offset
1492   //      | oopHandle area      |
1493   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1494   //      | outbound memory     |
1495   //      | based arguments     |
1496   //      |                     |
1497   //      |---------------------|
1498   //      |                     |
1499   // SP-> | out_preserved_slots |
1500   //
1501   //
1502 
1503 
1504   // Now compute actual number of stack words we need rounding to make
1505   // stack properly aligned.
1506   stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1507 
1508   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1509 
1510   // First thing make an ic check to see if we should even be here
1511 
1512   // We are free to use all registers as temps without saving them and
1513   // restoring them except rfp. rfp is the only callee save register
1514   // as far as the interpreter and the compiler(s) are concerned.
1515 
1516 
1517   const Register ic_reg = rscratch2;
1518   const Register receiver = j_rarg0;
1519 
1520   Label hit;
1521   Label exception_pending;
1522 
1523   assert_different_registers(ic_reg, receiver, rscratch1);
1524   __ verify_oop(receiver);
1525   __ cmp_klass(receiver, ic_reg, rscratch1);
1526   __ br(Assembler::EQ, hit);
1527 
1528   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1529 
1530   // Verified entry point must be aligned
1531   __ align(8);
1532 
1533   __ bind(hit);
1534 
1535   int vep_offset = ((intptr_t)__ pc()) - start;
1536 
1537   // If we have to make this method not-entrant we'll overwrite its
1538   // first instruction with a jump.  For this action to be legal we
1539   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1540   // SVC, HVC, or SMC.  Make it a NOP.
1541   __ nop();
1542 
1543   // Generate stack overflow check
1544   if (UseStackBanging) {
1545     __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1546   } else {
1547     Unimplemented();
1548   }
1549 
1550   // Generate a new frame for the wrapper.
1551   __ enter();
1552   // -2 because return address is already present and so is saved rfp
1553   __ sub(sp, sp, stack_size - 2*wordSize);
1554 
1555   // Frame is now completed as far as size and linkage.
1556   int frame_complete = ((intptr_t)__ pc()) - start;
1557 
1558   // record entry into native wrapper code
1559   if (NotifySimulator) {
1560     __ notify(Assembler::method_entry);
1561   }
1562 
1563   // We use r20 as the oop handle for the receiver/klass
1564   // It is callee save so it survives the call to native
1565 
1566   const Register oop_handle_reg = r20;
1567 
1568   if (is_critical_native) {
1569     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
1570                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1571   }
1572 
1573   //
1574   // We immediately shuffle the arguments so that any vm call we have to
1575   // make from here on out (sync slow path, jvmti, etc.) we will have
1576   // captured the oops from our caller and have a valid oopMap for
1577   // them.
1578 
1579   // -----------------
1580   // The Grand Shuffle
1581 
1582   // The Java calling convention is either equal (linux) or denser (win64) than the
1583   // c calling convention. However the because of the jni_env argument the c calling
1584   // convention always has at least one more (and two for static) arguments than Java.
1585   // Therefore if we move the args from java -> c backwards then we will never have
1586   // a register->register conflict and we don't have to build a dependency graph
1587   // and figure out how to break any cycles.
1588   //
1589 
1590   // Record esp-based slot for receiver on stack for non-static methods
1591   int receiver_offset = -1;
1592 
1593   // This is a trick. We double the stack slots so we can claim
1594   // the oops in the caller's frame. Since we are sure to have
1595   // more args than the caller doubling is enough to make
1596   // sure we can capture all the incoming oop args from the
1597   // caller.
1598   //
1599   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1600 
1601   // Mark location of rfp (someday)
1602   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1603 
1604 
1605   int float_args = 0;
1606   int int_args = 0;
1607 
1608 #ifdef ASSERT
1609   bool reg_destroyed[RegisterImpl::number_of_registers];
1610   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
1611   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
1612     reg_destroyed[r] = false;
1613   }
1614   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
1615     freg_destroyed[f] = false;
1616   }
1617 
1618 #endif /* ASSERT */
1619 
1620   // This may iterate in two different directions depending on the
1621   // kind of native it is.  The reason is that for regular JNI natives
1622   // the incoming and outgoing registers are offset upwards and for
1623   // critical natives they are offset down.
1624   GrowableArray<int> arg_order(2 * total_in_args);
1625   VMRegPair tmp_vmreg;
1626   tmp_vmreg.set1(r19->as_VMReg());
1627 
1628   if (!is_critical_native) {
1629     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1630       arg_order.push(i);
1631       arg_order.push(c_arg);
1632     }
1633   } else {
1634     // Compute a valid move order, using tmp_vmreg to break any cycles
1635     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
1636   }
1637 
1638   int temploc = -1;
1639   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1640     int i = arg_order.at(ai);
1641     int c_arg = arg_order.at(ai + 1);
1642     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1643     if (c_arg == -1) {
1644       assert(is_critical_native, "should only be required for critical natives");
1645       // This arg needs to be moved to a temporary
1646       __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
1647       in_regs[i] = tmp_vmreg;
1648       temploc = i;
1649       continue;
1650     } else if (i == -1) {
1651       assert(is_critical_native, "should only be required for critical natives");
1652       // Read from the temporary location
1653       assert(temploc != -1, "must be valid");
1654       i = temploc;
1655       temploc = -1;
1656     }
1657 #ifdef ASSERT
1658     if (in_regs[i].first()->is_Register()) {
1659       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1660     } else if (in_regs[i].first()->is_FloatRegister()) {
1661       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1662     }
1663     if (out_regs[c_arg].first()->is_Register()) {
1664       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1665     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1666       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1667     }
1668 #endif /* ASSERT */
1669     switch (in_sig_bt[i]) {
1670       case T_ARRAY:
1671         if (is_critical_native) {
1672           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1673           c_arg++;
1674 #ifdef ASSERT
1675           if (out_regs[c_arg].first()->is_Register()) {
1676             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1677           } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1678             freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1679           }
1680 #endif
1681           int_args++;
1682           break;
1683         }
1684       case T_OBJECT:
1685         assert(!is_critical_native, "no oop arguments");
1686         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1687                     ((i == 0) && (!is_static)),
1688                     &receiver_offset);
1689         int_args++;
1690         break;
1691       case T_VOID:
1692         break;
1693 
1694       case T_FLOAT:
1695         float_move(masm, in_regs[i], out_regs[c_arg]);
1696         float_args++;
1697         break;
1698 
1699       case T_DOUBLE:
1700         assert( i + 1 < total_in_args &&
1701                 in_sig_bt[i + 1] == T_VOID &&
1702                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1703         double_move(masm, in_regs[i], out_regs[c_arg]);
1704         float_args++;
1705         break;
1706 
1707       case T_LONG :
1708         long_move(masm, in_regs[i], out_regs[c_arg]);
1709         int_args++;
1710         break;
1711 
1712       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1713 
1714       default:
1715         move32_64(masm, in_regs[i], out_regs[c_arg]);
1716         int_args++;
1717     }
1718   }
1719 
1720   // point c_arg at the first arg that is already loaded in case we
1721   // need to spill before we call out
1722   int c_arg = total_c_args - total_in_args;
1723 
1724   // Pre-load a static method's oop into c_rarg1.
1725   if (method->is_static() && !is_critical_native) {
1726 
1727     //  load oop into a register
1728     __ movoop(c_rarg1,
1729               JNIHandles::make_local(method->method_holder()->java_mirror()),
1730               /*immediate*/true);
1731 
1732     // Now handlize the static class mirror it's known not-null.
1733     __ str(c_rarg1, Address(sp, klass_offset));
1734     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1735 
1736     // Now get the handle
1737     __ lea(c_rarg1, Address(sp, klass_offset));
1738     // and protect the arg if we must spill
1739     c_arg--;
1740   }
1741 
1742   // Change state to native (we save the return address in the thread, since it might not
1743   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1744   // points into the right code segment. It does not have to be the correct return pc.
1745   // We use the same pc/oopMap repeatedly when we call out
1746 
1747   intptr_t the_pc = (intptr_t) __ pc();
1748   oop_maps->add_gc_map(the_pc - start, map);
1749 
1750   __ set_last_Java_frame(sp, noreg, (address)the_pc, rscratch1);
1751 
1752   Label dtrace_method_entry, dtrace_method_entry_done;
1753   {
1754     unsigned long offset;
1755     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1756     __ ldrb(rscratch1, Address(rscratch1, offset));
1757     __ cbnzw(rscratch1, dtrace_method_entry);
1758     __ bind(dtrace_method_entry_done);
1759   }
1760 
1761   // RedefineClasses() tracing support for obsolete method entry
1762   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1763     // protect the args we've loaded
1764     save_args(masm, total_c_args, c_arg, out_regs);
1765     __ mov_metadata(c_rarg1, method());
1766     __ call_VM_leaf(
1767       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1768       rthread, c_rarg1);
1769     restore_args(masm, total_c_args, c_arg, out_regs);
1770   }
1771 
1772   // Lock a synchronized method
1773 
1774   // Register definitions used by locking and unlocking
1775 
1776   const Register swap_reg = r0;
1777   const Register obj_reg  = r19;  // Will contain the oop
1778   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1779   const Register old_hdr  = r13;  // value of old header at unlock time
1780   const Register tmp = lr;
1781 
1782   Label slow_path_lock;
1783   Label lock_done;
1784 
1785   if (method->is_synchronized()) {
1786     assert(!is_critical_native, "unhandled");
1787 
1788     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1789 
1790     // Get the handle (the 2nd argument)
1791     __ mov(oop_handle_reg, c_rarg1);
1792 
1793     // Get address of the box
1794 
1795     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1796 
1797     // Load the oop from the handle
1798     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1799 
1800     if (UseBiasedLocking) {
1801       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1802     }
1803 
1804     // Load (object->mark() | 1) into swap_reg %r0
1805     __ ldr(rscratch1, Address(obj_reg, 0));
1806     __ orr(swap_reg, rscratch1, 1);
1807 
1808     // Save (object->mark() | 1) into BasicLock's displaced header
1809     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1810 
1811     // src -> dest iff dest == r0 else r0 <- dest
1812     { Label here;
1813       __ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1814     }
1815 
1816     // Hmm should this move to the slow path code area???
1817 
1818     // Test if the oopMark is an obvious stack pointer, i.e.,
1819     //  1) (mark & 3) == 0, and
1820     //  2) sp <= mark < mark + os::pagesize()
1821     // These 3 tests can be done by evaluating the following
1822     // expression: ((mark - sp) & (3 - os::vm_page_size())),
1823     // assuming both stack pointer and pagesize have their
1824     // least significant 2 bits clear.
1825     // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1826 
1827     __ sub(swap_reg, sp, swap_reg);
1828     __ neg(swap_reg, swap_reg);
1829     __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1830 
1831     // Save the test result, for recursive case, the result is zero
1832     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1833     __ br(Assembler::NE, slow_path_lock);
1834 
1835     // Slow path will re-enter here
1836 
1837     __ bind(lock_done);
1838   }
1839 
1840 
1841   // Finally just about ready to make the JNI call
1842 
1843   // get JNIEnv* which is first argument to native
1844   if (!is_critical_native) {
1845     __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1846   }
1847 
1848   // Now set thread in native
1849   __ mov(rscratch1, _thread_in_native);
1850   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1851   __ stlrw(rscratch1, rscratch2);
1852 
1853   {
1854     int return_type = 0;
1855     switch (ret_type) {
1856     case T_VOID: break;
1857       return_type = 0; break;
1858     case T_CHAR:
1859     case T_BYTE:
1860     case T_SHORT:
1861     case T_INT:
1862     case T_BOOLEAN:
1863     case T_LONG:
1864       return_type = 1; break;
1865     case T_ARRAY:
1866     case T_OBJECT:
1867       return_type = 1; break;
1868     case T_FLOAT:
1869       return_type = 2; break;
1870     case T_DOUBLE:
1871       return_type = 3; break;
1872     default:
1873       ShouldNotReachHere();
1874     }
1875     rt_call(masm, native_func,
1876             int_args + 2, // AArch64 passes up to 8 args in int registers
1877             float_args,   // and up to 8 float args
1878             return_type);
1879   }
1880 
1881   // Unpack native results.
1882   switch (ret_type) {
1883   case T_BOOLEAN: __ ubfx(r0, r0, 0, 8);             break;
1884   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1885   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1886   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1887   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1888   case T_DOUBLE :
1889   case T_FLOAT  :
1890     // Result is in v0 we'll save as needed
1891     break;
1892   case T_ARRAY:                 // Really a handle
1893   case T_OBJECT:                // Really a handle
1894       break; // can't de-handlize until after safepoint check
1895   case T_VOID: break;
1896   case T_LONG: break;
1897   default       : ShouldNotReachHere();
1898   }
1899 
1900   // Switch thread to "native transition" state before reading the synchronization state.
1901   // This additional state is necessary because reading and testing the synchronization
1902   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1903   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1904   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1905   //     Thread A is resumed to finish this native method, but doesn't block here since it
1906   //     didn't see any synchronization is progress, and escapes.
1907   __ mov(rscratch1, _thread_in_native_trans);
1908 
1909   if(os::is_MP()) {
1910     if (UseMembar) {
1911       __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1912 
1913       // Force this write out before the read below
1914       __ dmb(Assembler::SY);
1915     } else {
1916       __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1917       __ stlrw(rscratch1, rscratch2);
1918 
1919       // Write serialization page so VM thread can do a pseudo remote membar.
1920       // We use the current thread pointer to calculate a thread specific
1921       // offset to write to within the page. This minimizes bus traffic
1922       // due to cache line collision.
1923       __ serialize_memory(rthread, r2);
1924     }
1925   }
1926 
1927   // check for safepoint operation in progress and/or pending suspend requests
1928   Label safepoint_in_progress, safepoint_in_progress_done;
1929   {
1930     assert(SafepointSynchronize::_not_synchronized == 0, "fix this code");
1931     unsigned long offset;
1932     __ adrp(rscratch1,
1933             ExternalAddress((address)SafepointSynchronize::address_of_state()),
1934             offset);
1935     __ ldrw(rscratch1, Address(rscratch1, offset));
1936     __ cbnzw(rscratch1, safepoint_in_progress);
1937     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1938     __ cbnzw(rscratch1, safepoint_in_progress);
1939     __ bind(safepoint_in_progress_done);
1940   }
1941 
1942   // change thread state
1943   Label after_transition;
1944   __ mov(rscratch1, _thread_in_Java);
1945   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1946   __ stlrw(rscratch1, rscratch2);
1947   __ bind(after_transition);
1948 
1949   Label reguard;
1950   Label reguard_done;
1951   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1952   __ cmpw(rscratch1, JavaThread::stack_guard_yellow_disabled);
1953   __ br(Assembler::EQ, reguard);
1954   __ bind(reguard_done);
1955 
1956   // native result if any is live
1957 
1958   // Unlock
1959   Label unlock_done;
1960   Label slow_path_unlock;
1961   if (method->is_synchronized()) {
1962 
1963     // Get locked oop from the handle we passed to jni
1964     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1965 
1966     Label done;
1967 
1968     if (UseBiasedLocking) {
1969       __ biased_locking_exit(obj_reg, old_hdr, done);
1970     }
1971 
1972     // Simple recursive lock?
1973 
1974     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1975     __ cbz(rscratch1, done);
1976 
1977     // Must save r0 if if it is live now because cmpxchg must use it
1978     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1979       save_native_result(masm, ret_type, stack_slots);
1980     }
1981 
1982 
1983     // get address of the stack lock
1984     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1985     //  get old displaced header
1986     __ ldr(old_hdr, Address(r0, 0));
1987 
1988     // Atomic swap old header if oop still contains the stack lock
1989     Label succeed;
1990     __ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1991     __ bind(succeed);
1992 
1993     // slow path re-enters here
1994     __ bind(unlock_done);
1995     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1996       restore_native_result(masm, ret_type, stack_slots);
1997     }
1998 
1999     __ bind(done);
2000   }
2001 
2002   Label dtrace_method_exit, dtrace_method_exit_done;
2003   {
2004     unsigned long offset;
2005     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2006     __ ldrb(rscratch1, Address(rscratch1, offset));
2007     __ cbnzw(rscratch1, dtrace_method_exit);
2008     __ bind(dtrace_method_exit_done);
2009   }
2010 
2011   __ reset_last_Java_frame(false, true);
2012 
2013   // Unpack oop result
2014   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2015       Label L;
2016       __ cbz(r0, L);
2017       __ ldr(r0, Address(r0, 0));
2018       __ bind(L);
2019       __ verify_oop(r0);
2020   }
2021 
2022   if (!is_critical_native) {
2023     // reset handle block
2024     __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2025     __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
2026   }
2027 
2028   __ leave();
2029 
2030   if (!is_critical_native) {
2031     // Any exception pending?
2032     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2033     __ cbnz(rscratch1, exception_pending);
2034   }
2035 
2036   // record exit from native wrapper code
2037   if (NotifySimulator) {
2038     __ notify(Assembler::method_reentry);
2039   }
2040 
2041   // We're done
2042   __ ret(lr);
2043 
2044   // Unexpected paths are out of line and go here
2045 
2046   if (!is_critical_native) {
2047     // forward the exception
2048     __ bind(exception_pending);
2049 
2050     // and forward the exception
2051     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2052   }
2053 
2054   // Slow path locking & unlocking
2055   if (method->is_synchronized()) {
2056 
2057     __ block_comment("Slow path lock {");
2058     __ bind(slow_path_lock);
2059 
2060     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2061     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2062 
2063     // protect the args we've loaded
2064     save_args(masm, total_c_args, c_arg, out_regs);
2065 
2066     __ mov(c_rarg0, obj_reg);
2067     __ mov(c_rarg1, lock_reg);
2068     __ mov(c_rarg2, rthread);
2069 
2070     // Not a leaf but we have last_Java_frame setup as we want
2071     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2072     restore_args(masm, total_c_args, c_arg, out_regs);
2073 
2074 #ifdef ASSERT
2075     { Label L;
2076       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2077       __ cbz(rscratch1, L);
2078       __ stop("no pending exception allowed on exit from monitorenter");
2079       __ bind(L);
2080     }
2081 #endif
2082     __ b(lock_done);
2083 
2084     __ block_comment("} Slow path lock");
2085 
2086     __ block_comment("Slow path unlock {");
2087     __ bind(slow_path_unlock);
2088 
2089     // If we haven't already saved the native result we must save it now as xmm registers
2090     // are still exposed.
2091 
2092     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2093       save_native_result(masm, ret_type, stack_slots);
2094     }
2095 
2096     __ mov(c_rarg2, rthread);
2097     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2098     __ mov(c_rarg0, obj_reg);
2099 
2100     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2101     // NOTE that obj_reg == r19 currently
2102     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2103     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2104 
2105     rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), 3, 0, 1);
2106 
2107 #ifdef ASSERT
2108     {
2109       Label L;
2110       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2111       __ cbz(rscratch1, L);
2112       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2113       __ bind(L);
2114     }
2115 #endif /* ASSERT */
2116 
2117     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2118 
2119     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2120       restore_native_result(masm, ret_type, stack_slots);
2121     }
2122     __ b(unlock_done);
2123 
2124     __ block_comment("} Slow path unlock");
2125 
2126   } // synchronized
2127 
2128   // SLOW PATH Reguard the stack if needed
2129 
2130   __ bind(reguard);
2131   save_native_result(masm, ret_type, stack_slots);
2132   rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), 0, 0, 0);
2133   restore_native_result(masm, ret_type, stack_slots);
2134   // and continue
2135   __ b(reguard_done);
2136 
2137   // SLOW PATH safepoint
2138   {
2139     __ block_comment("safepoint {");
2140     __ bind(safepoint_in_progress);
2141 
2142     // Don't use call_VM as it will see a possible pending exception and forward it
2143     // and never return here preventing us from clearing _last_native_pc down below.
2144     //
2145     save_native_result(masm, ret_type, stack_slots);
2146     __ mov(c_rarg0, rthread);
2147 #ifndef PRODUCT
2148   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2149 #endif
2150     if (!is_critical_native) {
2151       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2152     } else {
2153       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2154     }
2155     __ blrt(rscratch1, 1, 0, 1);
2156     __ maybe_isb();
2157     // Restore any method result value
2158     restore_native_result(masm, ret_type, stack_slots);
2159 
2160     if (is_critical_native) {
2161       // The call above performed the transition to thread_in_Java so
2162       // skip the transition logic above.
2163       __ b(after_transition);
2164     }
2165 
2166     __ b(safepoint_in_progress_done);
2167     __ block_comment("} safepoint");
2168   }
2169 
2170   // SLOW PATH dtrace support
2171   {
2172     __ block_comment("dtrace entry {");
2173     __ bind(dtrace_method_entry);
2174 
2175     // We have all of the arguments setup at this point. We must not touch any register
2176     // argument registers at this point (what if we save/restore them there are no oop?
2177 
2178     save_args(masm, total_c_args, c_arg, out_regs);
2179     __ mov_metadata(c_rarg1, method());
2180     __ call_VM_leaf(
2181       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2182       rthread, c_rarg1);
2183     restore_args(masm, total_c_args, c_arg, out_regs);
2184     __ b(dtrace_method_entry_done);
2185     __ block_comment("} dtrace entry");
2186   }
2187 
2188   {
2189     __ block_comment("dtrace exit {");
2190     __ bind(dtrace_method_exit);
2191     save_native_result(masm, ret_type, stack_slots);
2192     __ mov_metadata(c_rarg1, method());
2193     __ call_VM_leaf(
2194          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2195          rthread, c_rarg1);
2196     restore_native_result(masm, ret_type, stack_slots);
2197     __ b(dtrace_method_exit_done);
2198     __ block_comment("} dtrace exit");
2199   }
2200 
2201 
2202   __ flush();
2203 
2204   nmethod *nm = nmethod::new_native_nmethod(method,
2205                                             compile_id,
2206                                             masm->code(),
2207                                             vep_offset,
2208                                             frame_complete,
2209                                             stack_slots / VMRegImpl::slots_per_word,
2210                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2211                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2212                                             oop_maps);
2213 
2214   if (is_critical_native) {
2215     nm->set_lazy_critical_native(true);
2216   }
2217 
2218   return nm;
2219 
2220 }
2221 
2222 // this function returns the adjust size (in number of words) to a c2i adapter
2223 // activation for use during deoptimization
2224 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2225   assert(callee_locals >= callee_parameters,
2226           "test and remove; got more parms than locals");
2227   if (callee_locals < callee_parameters)
2228     return 0;                   // No adjustment for negative locals
2229   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2230   // diff is counted in stack words
2231   return round_to(diff, 2);
2232 }
2233 
2234 
2235 //------------------------------generate_deopt_blob----------------------------
2236 void SharedRuntime::generate_deopt_blob() {
2237   // Allocate space for the code
2238   ResourceMark rm;
2239   // Setup code generation tools
2240   CodeBuffer buffer("deopt_blob", 2048, 1024);
2241   MacroAssembler* masm = new MacroAssembler(&buffer);
2242   int frame_size_in_words;
2243   OopMap* map = NULL;
2244   OopMapSet *oop_maps = new OopMapSet();
2245 
2246 #ifdef BUILTIN_SIM
2247   AArch64Simulator *simulator;
2248   if (NotifySimulator) {
2249     simulator = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
2250     simulator->notifyCompile(const_cast<char*>("SharedRuntime::deopt_blob"), __ pc());
2251   }
2252 #endif
2253 
2254   // -------------
2255   // This code enters when returning to a de-optimized nmethod.  A return
2256   // address has been pushed on the the stack, and return values are in
2257   // registers.
2258   // If we are doing a normal deopt then we were called from the patched
2259   // nmethod from the point we returned to the nmethod. So the return
2260   // address on the stack is wrong by NativeCall::instruction_size
2261   // We will adjust the value so it looks like we have the original return
2262   // address on the stack (like when we eagerly deoptimized).
2263   // In the case of an exception pending when deoptimizing, we enter
2264   // with a return address on the stack that points after the call we patched
2265   // into the exception handler. We have the following register state from,
2266   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2267   //    r0: exception oop
2268   //    r19: exception handler
2269   //    r3: throwing pc
2270   // So in this case we simply jam r3 into the useless return address and
2271   // the stack looks just like we want.
2272   //
2273   // At this point we need to de-opt.  We save the argument return
2274   // registers.  We call the first C routine, fetch_unroll_info().  This
2275   // routine captures the return values and returns a structure which
2276   // describes the current frame size and the sizes of all replacement frames.
2277   // The current frame is compiled code and may contain many inlined
2278   // functions, each with their own JVM state.  We pop the current frame, then
2279   // push all the new frames.  Then we call the C routine unpack_frames() to
2280   // populate these frames.  Finally unpack_frames() returns us the new target
2281   // address.  Notice that callee-save registers are BLOWN here; they have
2282   // already been captured in the vframeArray at the time the return PC was
2283   // patched.
2284   address start = __ pc();
2285   Label cont;
2286 
2287   // Prolog for non exception case!
2288 
2289   // Save everything in sight.
2290   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2291 
2292   // Normal deoptimization.  Save exec mode for unpack_frames.
2293   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2294   __ b(cont);
2295 
2296   int reexecute_offset = __ pc() - start;
2297 
2298   // Reexecute case
2299   // return address is the pc describes what bci to do re-execute at
2300 
2301   // No need to update map as each call to save_live_registers will produce identical oopmap
2302   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2303 
2304   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2305   __ b(cont);
2306 
2307   int exception_offset = __ pc() - start;
2308 
2309   // Prolog for exception case
2310 
2311   // all registers are dead at this entry point, except for r0, and
2312   // r3 which contain the exception oop and exception pc
2313   // respectively.  Set them in TLS and fall thru to the
2314   // unpack_with_exception_in_tls entry point.
2315 
2316   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2317   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2318 
2319   int exception_in_tls_offset = __ pc() - start;
2320 
2321   // new implementation because exception oop is now passed in JavaThread
2322 
2323   // Prolog for exception case
2324   // All registers must be preserved because they might be used by LinearScan
2325   // Exceptiop oop and throwing PC are passed in JavaThread
2326   // tos: stack at point of call to method that threw the exception (i.e. only
2327   // args are on the stack, no return address)
2328 
2329   // The return address pushed by save_live_registers will be patched
2330   // later with the throwing pc. The correct value is not available
2331   // now because loading it from memory would destroy registers.
2332 
2333   // NB: The SP at this point must be the SP of the method that is
2334   // being deoptimized.  Deoptimization assumes that the frame created
2335   // here by save_live_registers is immediately below the method's SP.
2336   // This is a somewhat fragile mechanism.
2337 
2338   // Save everything in sight.
2339   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2340 
2341   // Now it is safe to overwrite any register
2342 
2343   // Deopt during an exception.  Save exec mode for unpack_frames.
2344   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2345 
2346   // load throwing pc from JavaThread and patch it as the return address
2347   // of the current frame. Then clear the field in JavaThread
2348 
2349   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2350   __ str(r3, Address(rfp, wordSize));
2351   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2352 
2353 #ifdef ASSERT
2354   // verify that there is really an exception oop in JavaThread
2355   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2356   __ verify_oop(r0);
2357 
2358   // verify that there is no pending exception
2359   Label no_pending_exception;
2360   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2361   __ cbz(rscratch1, no_pending_exception);
2362   __ stop("must not have pending exception here");
2363   __ bind(no_pending_exception);
2364 #endif
2365 
2366   __ bind(cont);
2367 
2368   // Call C code.  Need thread and this frame, but NOT official VM entry
2369   // crud.  We cannot block on this call, no GC can happen.
2370   //
2371   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2372 
2373   // fetch_unroll_info needs to call last_java_frame().
2374 
2375   Label retaddr;
2376   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2377 #ifdef ASSERT0
2378   { Label L;
2379     __ ldr(rscratch1, Address(rthread,
2380                               JavaThread::last_Java_fp_offset()));
2381     __ cbz(rscratch1, L);
2382     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2383     __ bind(L);
2384   }
2385 #endif // ASSERT
2386   __ mov(c_rarg0, rthread);
2387   __ mov(c_rarg1, rcpool);
2388   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2389   __ blrt(rscratch1, 1, 0, 1);
2390   __ bind(retaddr);
2391 
2392   // Need to have an oopmap that tells fetch_unroll_info where to
2393   // find any register it might need.
2394   oop_maps->add_gc_map(__ pc() - start, map);
2395 
2396   __ reset_last_Java_frame(false, true);
2397 
2398   // Load UnrollBlock* into rdi
2399   __ mov(r5, r0);
2400 
2401   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2402    Label noException;
2403   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2404   __ br(Assembler::NE, noException);
2405   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2406   // QQQ this is useless it was NULL above
2407   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2408   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2409   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2410 
2411   __ verify_oop(r0);
2412 
2413   // Overwrite the result registers with the exception results.
2414   __ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes()));
2415   // I think this is useless
2416   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2417 
2418   __ bind(noException);
2419 
2420   // Only register save data is on the stack.
2421   // Now restore the result registers.  Everything else is either dead
2422   // or captured in the vframeArray.
2423   RegisterSaver::restore_result_registers(masm);
2424 
2425   // All of the register save area has been popped of the stack. Only the
2426   // return address remains.
2427 
2428   // Pop all the frames we must move/replace.
2429   //
2430   // Frame picture (youngest to oldest)
2431   // 1: self-frame (no frame link)
2432   // 2: deopting frame  (no frame link)
2433   // 3: caller of deopting frame (could be compiled/interpreted).
2434   //
2435   // Note: by leaving the return address of self-frame on the stack
2436   // and using the size of frame 2 to adjust the stack
2437   // when we are done the return to frame 3 will still be on the stack.
2438 
2439   // Pop deoptimized frame
2440   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2441   __ sub(r2, r2, 2 * wordSize);
2442   __ add(sp, sp, r2);
2443   __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2444   // LR should now be the return address to the caller (3)
2445 
2446 #ifdef ASSERT
2447   // Compilers generate code that bang the stack by as much as the
2448   // interpreter would need. So this stack banging should never
2449   // trigger a fault. Verify that it does not on non product builds.
2450   if (UseStackBanging) {
2451     __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2452     __ bang_stack_size(r19, r2);
2453   }
2454 #endif
2455   // Load address of array of frame pcs into r2
2456   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2457 
2458   // Trash the old pc
2459   // __ addptr(sp, wordSize);  FIXME ????
2460 
2461   // Load address of array of frame sizes into r4
2462   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2463 
2464   // Load counter into r3
2465   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2466 
2467   // Now adjust the caller's stack to make up for the extra locals
2468   // but record the original sp so that we can save it in the skeletal interpreter
2469   // frame and the stack walking of interpreter_sender will get the unextended sp
2470   // value and not the "real" sp value.
2471 
2472   const Register sender_sp = r6;
2473 
2474   __ mov(sender_sp, sp);
2475   __ ldrw(r19, Address(r5,
2476                        Deoptimization::UnrollBlock::
2477                        caller_adjustment_offset_in_bytes()));
2478   __ sub(sp, sp, r19);
2479 
2480   // Push interpreter frames in a loop
2481   __ mov(rscratch1, (address)0xDEADDEAD);        // Make a recognizable pattern
2482   __ mov(rscratch2, rscratch1);
2483   Label loop;
2484   __ bind(loop);
2485   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2486   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2487   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2488   __ enter();                           // Save old & set new fp
2489   __ sub(sp, sp, r19);                  // Prolog
2490   // This value is corrected by layout_activation_impl
2491   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2492   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2493   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2494   __ sub(r3, r3, 1);                   // Decrement counter
2495   __ cbnz(r3, loop);
2496 
2497     // Re-push self-frame
2498   __ ldr(lr, Address(r2));
2499   __ enter();
2500 
2501   // Allocate a full sized register save area.  We subtract 2 because
2502   // enter() just pushed 2 words
2503   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2504 
2505   // Restore frame locals after moving the frame
2506   __ strd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes()));
2507   __ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes()));
2508 
2509   // Call C code.  Need thread but NOT official VM entry
2510   // crud.  We cannot block on this call, no GC can happen.  Call should
2511   // restore return values to their stack-slots with the new SP.
2512   //
2513   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2514 
2515   // Use rfp because the frames look interpreted now
2516   // Don't need the precise return PC here, just precise enough to point into this code blob.
2517   address the_pc = __ pc();
2518   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2519 
2520   __ mov(c_rarg0, rthread);
2521   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2522   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2523   __ blrt(rscratch1, 2, 0, 0);
2524 
2525   // Set an oopmap for the call site
2526   // Use the same PC we used for the last java frame
2527   oop_maps->add_gc_map(the_pc - start,
2528                        new OopMap( frame_size_in_words, 0 ));
2529 
2530   // Clear fp AND pc
2531   __ reset_last_Java_frame(true, true);
2532 
2533   // Collect return values
2534   __ ldrd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes()));
2535   __ ldr(r0, Address(sp, RegisterSaver::r0_offset_in_bytes()));
2536   // I think this is useless (throwing pc?)
2537   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2538 
2539   // Pop self-frame.
2540   __ leave();                           // Epilog
2541 
2542   // Jump to interpreter
2543   __ ret(lr);
2544 
2545   // Make sure all code is generated
2546   masm->flush();
2547 
2548   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2549   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2550 
2551 #ifdef BUILTIN_SIM
2552   if (NotifySimulator) {
2553     unsigned char *base = _deopt_blob->code_begin();
2554     simulator->notifyRelocate(start, base - start);
2555   }
2556 #endif
2557 }
2558 
2559 uint SharedRuntime::out_preserve_stack_slots() {
2560   return 0;
2561 }
2562 
2563 #ifdef COMPILER2
2564 //------------------------------generate_uncommon_trap_blob--------------------
2565 void SharedRuntime::generate_uncommon_trap_blob() {
2566   // Allocate space for the code
2567   ResourceMark rm;
2568   // Setup code generation tools
2569   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2570   MacroAssembler* masm = new MacroAssembler(&buffer);
2571 
2572 #ifdef BUILTIN_SIM
2573   AArch64Simulator *simulator;
2574   if (NotifySimulator) {
2575     simulator = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
2576     simulator->notifyCompile(const_cast<char*>("SharedRuntime:uncommon_trap_blob"), __ pc());
2577   }
2578 #endif
2579 
2580   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2581 
2582   address start = __ pc();
2583 
2584   // Push self-frame.  We get here with a return address in LR
2585   // and sp should be 16 byte aligned
2586   // push rfp and retaddr by hand
2587   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2588   // we don't expect an arg reg save area
2589 #ifndef PRODUCT
2590   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2591 #endif
2592   // compiler left unloaded_class_index in j_rarg0 move to where the
2593   // runtime expects it.
2594   if (c_rarg1 != j_rarg0) {
2595     __ movw(c_rarg1, j_rarg0);
2596   }
2597 
2598   // we need to set the past SP to the stack pointer of the stub frame
2599   // and the pc to the address where this runtime call will return
2600   // although actually any pc in this code blob will do).
2601   Label retaddr;
2602   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2603 
2604   // Call C code.  Need thread but NOT official VM entry
2605   // crud.  We cannot block on this call, no GC can happen.  Call should
2606   // capture callee-saved registers as well as return values.
2607   // Thread is in rdi already.
2608   //
2609   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2610   //
2611   // n.b. 2 gp args, 0 fp args, integral return type
2612 
2613   __ mov(c_rarg0, rthread);
2614   __ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
2615   __ lea(rscratch1,
2616          RuntimeAddress(CAST_FROM_FN_PTR(address,
2617                                          Deoptimization::uncommon_trap)));
2618   __ blrt(rscratch1, 2, 0, MacroAssembler::ret_type_integral);
2619   __ bind(retaddr);
2620 
2621   // Set an oopmap for the call site
2622   OopMapSet* oop_maps = new OopMapSet();
2623   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2624 
2625   // location of rfp is known implicitly by the frame sender code
2626 
2627   oop_maps->add_gc_map(__ pc() - start, map);
2628 
2629   __ reset_last_Java_frame(false, true);
2630 
2631   // move UnrollBlock* into r4
2632   __ mov(r4, r0);
2633 
2634 #ifdef ASSERT
2635   { Label L;
2636     __ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2637     __ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2638     __ br(Assembler::EQ, L);
2639     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2640     __ bind(L);
2641   }
2642 #endif
2643 
2644   // Pop all the frames we must move/replace.
2645   //
2646   // Frame picture (youngest to oldest)
2647   // 1: self-frame (no frame link)
2648   // 2: deopting frame  (no frame link)
2649   // 3: caller of deopting frame (could be compiled/interpreted).
2650 
2651   // Pop self-frame.  We have no frame, and must rely only on r0 and sp.
2652   __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog!
2653 
2654   // Pop deoptimized frame (int)
2655   __ ldrw(r2, Address(r4,
2656                       Deoptimization::UnrollBlock::
2657                       size_of_deoptimized_frame_offset_in_bytes()));
2658   __ sub(r2, r2, 2 * wordSize);
2659   __ add(sp, sp, r2);
2660   __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2661   // LR should now be the return address to the caller (3) frame
2662 
2663 #ifdef ASSERT
2664   // Compilers generate code that bang the stack by as much as the
2665   // interpreter would need. So this stack banging should never
2666   // trigger a fault. Verify that it does not on non product builds.
2667   if (UseStackBanging) {
2668     __ ldrw(r1, Address(r4,
2669                         Deoptimization::UnrollBlock::
2670                         total_frame_sizes_offset_in_bytes()));
2671     __ bang_stack_size(r1, r2);
2672   }
2673 #endif
2674 
2675   // Load address of array of frame pcs into r2 (address*)
2676   __ ldr(r2, Address(r4,
2677                      Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2678 
2679   // Load address of array of frame sizes into r5 (intptr_t*)
2680   __ ldr(r5, Address(r4,
2681                      Deoptimization::UnrollBlock::
2682                      frame_sizes_offset_in_bytes()));
2683 
2684   // Counter
2685   __ ldrw(r3, Address(r4,
2686                       Deoptimization::UnrollBlock::
2687                       number_of_frames_offset_in_bytes())); // (int)
2688 
2689   // Now adjust the caller's stack to make up for the extra locals but
2690   // record the original sp so that we can save it in the skeletal
2691   // interpreter frame and the stack walking of interpreter_sender
2692   // will get the unextended sp value and not the "real" sp value.
2693 
2694   const Register sender_sp = r8;
2695 
2696   __ mov(sender_sp, sp);
2697   __ ldrw(r1, Address(r4,
2698                       Deoptimization::UnrollBlock::
2699                       caller_adjustment_offset_in_bytes())); // (int)
2700   __ sub(sp, sp, r1);
2701 
2702   // Push interpreter frames in a loop
2703   Label loop;
2704   __ bind(loop);
2705   __ ldr(r1, Address(r5, 0));       // Load frame size
2706   __ sub(r1, r1, 2 * wordSize);     // We'll push pc and rfp by hand
2707   __ ldr(lr, Address(r2, 0));       // Save return address
2708   __ enter();                       // and old rfp & set new rfp
2709   __ sub(sp, sp, r1);               // Prolog
2710   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2711   // This value is corrected by layout_activation_impl
2712   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2713   __ mov(sender_sp, sp);          // Pass sender_sp to next frame
2714   __ add(r5, r5, wordSize);       // Bump array pointer (sizes)
2715   __ add(r2, r2, wordSize);       // Bump array pointer (pcs)
2716   __ subsw(r3, r3, 1);            // Decrement counter
2717   __ br(Assembler::GT, loop);
2718   __ ldr(lr, Address(r2, 0));     // save final return address
2719   // Re-push self-frame
2720   __ enter();                     // & old rfp & set new rfp
2721 
2722   // Use rfp because the frames look interpreted now
2723   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2724   // Don't need the precise return PC here, just precise enough to point into this code blob.
2725   address the_pc = __ pc();
2726   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2727 
2728   // Call C code.  Need thread but NOT official VM entry
2729   // crud.  We cannot block on this call, no GC can happen.  Call should
2730   // restore return values to their stack-slots with the new SP.
2731   // Thread is in rdi already.
2732   //
2733   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
2734   //
2735   // n.b. 2 gp args, 0 fp args, integral return type
2736 
2737   // sp should already be aligned
2738   __ mov(c_rarg0, rthread);
2739   __ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2740   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2741   __ blrt(rscratch1, 2, 0, MacroAssembler::ret_type_integral);
2742 
2743   // Set an oopmap for the call site
2744   // Use the same PC we used for the last java frame
2745   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2746 
2747   // Clear fp AND pc
2748   __ reset_last_Java_frame(true, true);
2749 
2750   // Pop self-frame.
2751   __ leave();                 // Epilog
2752 
2753   // Jump to interpreter
2754   __ ret(lr);
2755 
2756   // Make sure all code is generated
2757   masm->flush();
2758 
2759   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
2760                                                  SimpleRuntimeFrame::framesize >> 1);
2761 
2762 #ifdef BUILTIN_SIM
2763   if (NotifySimulator) {
2764     unsigned char *base = _deopt_blob->code_begin();
2765     simulator->notifyRelocate(start, base - start);
2766   }
2767 #endif
2768 }
2769 #endif // COMPILER2
2770 
2771 
2772 //------------------------------generate_handler_blob------
2773 //
2774 // Generate a special Compile2Runtime blob that saves all registers,
2775 // and setup oopmap.
2776 //
2777 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2778   ResourceMark rm;
2779   OopMapSet *oop_maps = new OopMapSet();
2780   OopMap* map;
2781 
2782   // Allocate space for the code.  Setup code generation tools.
2783   CodeBuffer buffer("handler_blob", 2048, 1024);
2784   MacroAssembler* masm = new MacroAssembler(&buffer);
2785 
2786   address start   = __ pc();
2787   address call_pc = NULL;
2788   int frame_size_in_words;
2789   bool cause_return = (poll_type == POLL_AT_RETURN);
2790   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2791 
2792   // Save registers, fpu state, and flags
2793   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
2794 
2795   // The following is basically a call_VM.  However, we need the precise
2796   // address of the call in order to generate an oopmap. Hence, we do all the
2797   // work outselves.
2798 
2799   Label retaddr;
2800   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2801 
2802   // The return address must always be correct so that frame constructor never
2803   // sees an invalid pc.
2804 
2805   if (!cause_return) {
2806     // overwrite the return address pushed by save_live_registers
2807     __ ldr(c_rarg0, Address(rthread, JavaThread::saved_exception_pc_offset()));
2808     __ str(c_rarg0, Address(rfp, wordSize));
2809   }
2810 
2811   // Do the call
2812   __ mov(c_rarg0, rthread);
2813   __ lea(rscratch1, RuntimeAddress(call_ptr));
2814   __ blrt(rscratch1, 1, 0, 1);
2815   __ bind(retaddr);
2816 
2817   // Set an oopmap for the call site.  This oopmap will map all
2818   // oop-registers and debug-info registers as callee-saved.  This
2819   // will allow deoptimization at this safepoint to find all possible
2820   // debug-info recordings, as well as let GC find all oops.
2821 
2822   oop_maps->add_gc_map( __ pc() - start, map);
2823 
2824   Label noException;
2825 
2826   __ reset_last_Java_frame(false, true);
2827 
2828   __ maybe_isb();
2829   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2830 
2831   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2832   __ cbz(rscratch1, noException);
2833 
2834   // Exception pending
2835 
2836   RegisterSaver::restore_live_registers(masm);
2837 
2838   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2839 
2840   // No exception case
2841   __ bind(noException);
2842 
2843   // Normal exit, restore registers and exit.
2844   RegisterSaver::restore_live_registers(masm, save_vectors);
2845 
2846   __ ret(lr);
2847 
2848   // Make sure all code is generated
2849   masm->flush();
2850 
2851   // Fill-out other meta info
2852   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2853 }
2854 
2855 //
2856 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2857 //
2858 // Generate a stub that calls into vm to find out the proper destination
2859 // of a java call. All the argument registers are live at this point
2860 // but since this is generic code we don't know what they are and the caller
2861 // must do any gc of the args.
2862 //
2863 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2864   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2865 
2866   // allocate space for the code
2867   ResourceMark rm;
2868 
2869   CodeBuffer buffer(name, 1000, 512);
2870   MacroAssembler* masm                = new MacroAssembler(&buffer);
2871 
2872   int frame_size_in_words;
2873 
2874   OopMapSet *oop_maps = new OopMapSet();
2875   OopMap* map = NULL;
2876 
2877   int start = __ offset();
2878 
2879   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2880 
2881   int frame_complete = __ offset();
2882 
2883   {
2884     Label retaddr;
2885     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2886 
2887     __ mov(c_rarg0, rthread);
2888     __ lea(rscratch1, RuntimeAddress(destination));
2889 
2890     __ blrt(rscratch1, 1, 0, 1);
2891     __ bind(retaddr);
2892   }
2893 
2894   // Set an oopmap for the call site.
2895   // We need this not only for callee-saved registers, but also for volatile
2896   // registers that the compiler might be keeping live across a safepoint.
2897 
2898   oop_maps->add_gc_map( __ offset() - start, map);
2899 
2900   __ maybe_isb();
2901 
2902   // r0 contains the address we are going to jump to assuming no exception got installed
2903 
2904   // clear last_Java_sp
2905   __ reset_last_Java_frame(false, true);
2906   // check for pending exceptions
2907   Label pending;
2908   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2909   __ cbnz(rscratch1, pending);
2910 
2911   // get the returned Method*
2912   __ get_vm_result_2(rmethod, rthread);
2913   __ str(rmethod, Address(sp, RegisterSaver::reg_offset_in_bytes(rmethod)));
2914 
2915   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2916   __ str(r0, Address(sp, RegisterSaver::rscratch1_offset_in_bytes()));
2917   RegisterSaver::restore_live_registers(masm);
2918 
2919   // We are back the the original state on entry and ready to go.
2920 
2921   __ br(rscratch1);
2922 
2923   // Pending exception after the safepoint
2924 
2925   __ bind(pending);
2926 
2927   RegisterSaver::restore_live_registers(masm);
2928 
2929   // exception pending => remove activation and forward to exception handler
2930 
2931   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2932 
2933   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2934   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2935 
2936   // -------------
2937   // make sure all code is generated
2938   masm->flush();
2939 
2940   // return the  blob
2941   // frame_size_words or bytes??
2942   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2943 }
2944 
2945 
2946 #ifdef COMPILER2
2947 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
2948 //
2949 //------------------------------generate_exception_blob---------------------------
2950 // creates exception blob at the end
2951 // Using exception blob, this code is jumped from a compiled method.
2952 // (see emit_exception_handler in x86_64.ad file)
2953 //
2954 // Given an exception pc at a call we call into the runtime for the
2955 // handler in this method. This handler might merely restore state
2956 // (i.e. callee save registers) unwind the frame and jump to the
2957 // exception handler for the nmethod if there is no Java level handler
2958 // for the nmethod.
2959 //
2960 // This code is entered with a jmp.
2961 //
2962 // Arguments:
2963 //   r0: exception oop
2964 //   r3: exception pc
2965 //
2966 // Results:
2967 //   r0: exception oop
2968 //   r3: exception pc in caller or ???
2969 //   destination: exception handler of caller
2970 //
2971 // Note: the exception pc MUST be at a call (precise debug information)
2972 //       Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved.
2973 //
2974 
2975 void OptoRuntime::generate_exception_blob() {
2976   assert(!OptoRuntime::is_callee_saved_register(R3_num), "");
2977   assert(!OptoRuntime::is_callee_saved_register(R0_num), "");
2978   assert(!OptoRuntime::is_callee_saved_register(R2_num), "");
2979 
2980   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2981 
2982   // Allocate space for the code
2983   ResourceMark rm;
2984   // Setup code generation tools
2985   CodeBuffer buffer("exception_blob", 2048, 1024);
2986   MacroAssembler* masm = new MacroAssembler(&buffer);
2987 
2988   // TODO check various assumptions made here
2989   //
2990   // make sure we do so before running this
2991 
2992   address start = __ pc();
2993 
2994   // push rfp and retaddr by hand
2995   // Exception pc is 'return address' for stack walker
2996   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2997   // there are no callee save registers and we don't expect an
2998   // arg reg save area
2999 #ifndef PRODUCT
3000   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
3001 #endif
3002   // Store exception in Thread object. We cannot pass any arguments to the
3003   // handle_exception call, since we do not want to make any assumption
3004   // about the size of the frame where the exception happened in.
3005   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
3006   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
3007 
3008   // This call does all the hard work.  It checks if an exception handler
3009   // exists in the method.
3010   // If so, it returns the handler address.
3011   // If not, it prepares for stack-unwinding, restoring the callee-save
3012   // registers of the frame being removed.
3013   //
3014   // address OptoRuntime::handle_exception_C(JavaThread* thread)
3015   //
3016   // n.b. 1 gp arg, 0 fp args, integral return type
3017 
3018   // the stack should always be aligned
3019   address the_pc = __ pc();
3020   __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
3021   __ mov(c_rarg0, rthread);
3022   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3023   __ blrt(rscratch1, 1, 0, MacroAssembler::ret_type_integral);
3024   __ maybe_isb();
3025 
3026   // Set an oopmap for the call site.  This oopmap will only be used if we
3027   // are unwinding the stack.  Hence, all locations will be dead.
3028   // Callee-saved registers will be the same as the frame above (i.e.,
3029   // handle_exception_stub), since they were restored when we got the
3030   // exception.
3031 
3032   OopMapSet* oop_maps = new OopMapSet();
3033 
3034   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3035 
3036   __ reset_last_Java_frame(false, true);
3037 
3038   // Restore callee-saved registers
3039 
3040   // rfp is an implicitly saved callee saved register (i.e. the calling
3041   // convention will save restore it in prolog/epilog) Other than that
3042   // there are no callee save registers now that adapter frames are gone.
3043   // and we dont' expect an arg reg save area
3044   __ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize)));
3045 
3046   // r0: exception handler
3047 
3048   // We have a handler in r0 (could be deopt blob).
3049   __ mov(r8, r0);
3050 
3051   // Get the exception oop
3052   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
3053   // Get the exception pc in case we are deoptimized
3054   __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset()));
3055 #ifdef ASSERT
3056   __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3057   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3058 #endif
3059   // Clear the exception oop so GC no longer processes it as a root.
3060   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3061 
3062   // r0: exception oop
3063   // r8:  exception handler
3064   // r4: exception pc
3065   // Jump to handler
3066 
3067   __ br(r8);
3068 
3069   // Make sure all code is generated
3070   masm->flush();
3071 
3072   // Set exception blob
3073   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3074 }
3075 #endif // COMPILER2