1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "assembler_arm.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "logging/log.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/compiledICHolder.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "runtime/vframeArray.hpp"
  37 #include "utilities/align.hpp"
  38 #include "vmreg_arm.inline.hpp"
  39 #ifdef COMPILER1
  40 #include "c1/c1_Runtime1.hpp"
  41 #endif
  42 #ifdef COMPILER2
  43 #include "opto/runtime.hpp"
  44 #endif
  45 
  46 #define __ masm->
  47 
  48 class RegisterSaver {
  49 public:
  50 
  51   // Special registers:
  52   //              32-bit ARM     64-bit ARM
  53   //  Rthread:       R10            R28
  54   //  LR:            R14            R30
  55 
  56   // Rthread is callee saved in the C ABI and never changed by compiled code:
  57   // no need to save it.
  58 
  59   // 2 slots for LR: the one at LR_offset and an other one at R14/R30_offset.
  60   // The one at LR_offset is a return address that is needed by stack walking.
  61   // A c2 method uses LR as a standard register so it may be live when we
  62   // branch to the runtime. The slot at R14/R30_offset is for the value of LR
  63   // in case it's live in the method we are coming from.
  64 
  65 
  66   enum RegisterLayout {
  67     fpu_save_size = FloatRegisterImpl::number_of_registers,
  68 #ifndef __SOFTFP__
  69     D0_offset = 0,
  70 #endif
  71     R0_offset = fpu_save_size,
  72     R1_offset,
  73     R2_offset,
  74     R3_offset,
  75     R4_offset,
  76     R5_offset,
  77     R6_offset,
  78 #if (FP_REG_NUM != 7)
  79     // if not saved as FP
  80     R7_offset,
  81 #endif
  82     R8_offset,
  83     R9_offset,
  84 #if (FP_REG_NUM != 11)
  85     // if not saved as FP
  86     R11_offset,
  87 #endif
  88     R12_offset,
  89     R14_offset,
  90     FP_offset,
  91     LR_offset,
  92     reg_save_size,
  93 
  94     Rmethod_offset = R9_offset,
  95     Rtemp_offset = R12_offset,
  96   };
  97 
  98   // all regs but Rthread (R10), FP (R7 or R11), SP and PC
  99   // (altFP_7_11 is the one amoung R7 and R11 which is not FP)
 100 #define SAVED_BASE_REGS (RegisterSet(R0, R6) | RegisterSet(R8, R9) | RegisterSet(R12) | R14 | altFP_7_11)
 101 
 102 
 103   //  When LR may be live in the nmethod from which we are comming
 104   //  then lr_saved is true, the return address is saved before the
 105   //  call to save_live_register by the caller and LR contains the
 106   //  live value.
 107 
 108   static OopMap* save_live_registers(MacroAssembler* masm,
 109                                      int* total_frame_words,
 110                                      bool lr_saved = false);
 111   static void restore_live_registers(MacroAssembler* masm, bool restore_lr = true);
 112 
 113 };
 114 
 115 
 116 
 117 
 118 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm,
 119                                            int* total_frame_words,
 120                                            bool lr_saved) {
 121   *total_frame_words = reg_save_size;
 122 
 123   OopMapSet *oop_maps = new OopMapSet();
 124   OopMap* map = new OopMap(VMRegImpl::slots_per_word * (*total_frame_words), 0);
 125 
 126   if (lr_saved) {
 127     __ push(RegisterSet(FP));
 128   } else {
 129     __ push(RegisterSet(FP) | RegisterSet(LR));
 130   }
 131   __ push(SAVED_BASE_REGS);
 132   if (HaveVFP) {
 133     if (VM_Version::has_vfp3_32()) {
 134       __ fstmdbd(SP, FloatRegisterSet(D16, 16), writeback);
 135     } else {
 136       if (FloatRegisterImpl::number_of_registers > 32) {
 137         assert(FloatRegisterImpl::number_of_registers == 64, "nb fp registers should be 64");
 138         __ sub(SP, SP, 32 * wordSize);
 139       }
 140     }
 141     __ fstmdbd(SP, FloatRegisterSet(D0, 16), writeback);
 142   } else {
 143     __ sub(SP, SP, fpu_save_size * wordSize);
 144   }
 145 
 146   int i;
 147   int j=0;
 148   for (i = R0_offset; i <= R9_offset; i++) {
 149     if (j == FP_REG_NUM) {
 150       // skip the FP register, managed below.
 151       j++;
 152     }
 153     map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg());
 154     j++;
 155   }
 156   assert(j == R10->encoding(), "must be");
 157 #if (FP_REG_NUM != 11)
 158   // add R11, if not managed as FP
 159   map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg());
 160 #endif
 161   map->set_callee_saved(VMRegImpl::stack2reg(R12_offset), R12->as_VMReg());
 162   map->set_callee_saved(VMRegImpl::stack2reg(R14_offset), R14->as_VMReg());
 163   if (HaveVFP) {
 164     for (i = 0; i < (VM_Version::has_vfp3_32() ? 64 : 32); i+=2) {
 165       map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg());
 166       map->set_callee_saved(VMRegImpl::stack2reg(i + 1), as_FloatRegister(i)->as_VMReg()->next());
 167     }
 168   }
 169 
 170   return map;
 171 }
 172 
 173 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_lr) {
 174   if (HaveVFP) {
 175     __ fldmiad(SP, FloatRegisterSet(D0, 16), writeback);
 176     if (VM_Version::has_vfp3_32()) {
 177       __ fldmiad(SP, FloatRegisterSet(D16, 16), writeback);
 178     } else {
 179       if (FloatRegisterImpl::number_of_registers > 32) {
 180         assert(FloatRegisterImpl::number_of_registers == 64, "nb fp registers should be 64");
 181         __ add(SP, SP, 32 * wordSize);
 182       }
 183     }
 184   } else {
 185     __ add(SP, SP, fpu_save_size * wordSize);
 186   }
 187   __ pop(SAVED_BASE_REGS);
 188   if (restore_lr) {
 189     __ pop(RegisterSet(FP) | RegisterSet(LR));
 190   } else {
 191     __ pop(RegisterSet(FP));
 192   }
 193 }
 194 
 195 
 196 static void push_result_registers(MacroAssembler* masm, BasicType ret_type) {
 197 #ifdef __ABI_HARD__
 198   if (ret_type == T_DOUBLE || ret_type == T_FLOAT) {
 199     __ sub(SP, SP, 8);
 200     __ fstd(D0, Address(SP));
 201     return;
 202   }
 203 #endif // __ABI_HARD__
 204   __ raw_push(R0, R1);
 205 }
 206 
 207 static void pop_result_registers(MacroAssembler* masm, BasicType ret_type) {
 208 #ifdef __ABI_HARD__
 209   if (ret_type == T_DOUBLE || ret_type == T_FLOAT) {
 210     __ fldd(D0, Address(SP));
 211     __ add(SP, SP, 8);
 212     return;
 213   }
 214 #endif // __ABI_HARD__
 215   __ raw_pop(R0, R1);
 216 }
 217 
 218 static void push_param_registers(MacroAssembler* masm, int fp_regs_in_arguments) {
 219   // R1-R3 arguments need to be saved, but we push 4 registers for 8-byte alignment
 220   __ push(RegisterSet(R0, R3));
 221 
 222 #ifdef __ABI_HARD__
 223   // preserve arguments
 224   // Likely not needed as the locking code won't probably modify volatile FP registers,
 225   // but there is no way to guarantee that
 226   if (fp_regs_in_arguments) {
 227     // convert fp_regs_in_arguments to a number of double registers
 228     int double_regs_num = (fp_regs_in_arguments + 1) >> 1;
 229     __ fstmdbd(SP, FloatRegisterSet(D0, double_regs_num), writeback);
 230   }
 231 #endif // __ ABI_HARD__
 232 }
 233 
 234 static void pop_param_registers(MacroAssembler* masm, int fp_regs_in_arguments) {
 235 #ifdef __ABI_HARD__
 236   if (fp_regs_in_arguments) {
 237     int double_regs_num = (fp_regs_in_arguments + 1) >> 1;
 238     __ fldmiad(SP, FloatRegisterSet(D0, double_regs_num), writeback);
 239   }
 240 #endif // __ABI_HARD__
 241 
 242   __ pop(RegisterSet(R0, R3));
 243 }
 244 
 245 
 246 
 247 // Is vector's size (in bytes) bigger than a size saved by default?
 248 // All vector registers are saved by default on ARM.
 249 bool SharedRuntime::is_wide_vector(int size) {
 250   return false;
 251 }
 252 
 253 size_t SharedRuntime::trampoline_size() {
 254   return 16;
 255 }
 256 
 257 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 258   InlinedAddress dest(destination);
 259   __ indirect_jump(dest, Rtemp);
 260   __ bind_literal(dest);
 261 }
 262 
 263 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 264                                         VMRegPair *regs,
 265                                         VMRegPair *regs2,
 266                                         int total_args_passed) {
 267   assert(regs2 == NULL, "not needed on arm");
 268 
 269   int slot = 0;
 270   int ireg = 0;
 271 #ifdef __ABI_HARD__
 272   int fp_slot = 0;
 273   int single_fpr_slot = 0;
 274 #endif // __ABI_HARD__
 275   for (int i = 0; i < total_args_passed; i++) {
 276     switch (sig_bt[i]) {
 277     case T_SHORT:
 278     case T_CHAR:
 279     case T_BYTE:
 280     case T_BOOLEAN:
 281     case T_INT:
 282     case T_ARRAY:
 283     case T_OBJECT:
 284     case T_ADDRESS:
 285     case T_METADATA:
 286 #ifndef __ABI_HARD__
 287     case T_FLOAT:
 288 #endif // !__ABI_HARD__
 289       if (ireg < 4) {
 290         Register r = as_Register(ireg);
 291         regs[i].set1(r->as_VMReg());
 292         ireg++;
 293       } else {
 294         regs[i].set1(VMRegImpl::stack2reg(slot));
 295         slot++;
 296       }
 297       break;
 298     case T_LONG:
 299 #ifndef __ABI_HARD__
 300     case T_DOUBLE:
 301 #endif // !__ABI_HARD__
 302       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 303       if (ireg <= 2) {
 304 #if (ALIGN_WIDE_ARGUMENTS == 1)
 305         if(ireg & 1) ireg++;  // Aligned location required
 306 #endif
 307         Register r1 = as_Register(ireg);
 308         Register r2 = as_Register(ireg + 1);
 309         regs[i].set_pair(r2->as_VMReg(), r1->as_VMReg());
 310         ireg += 2;
 311 #if (ALIGN_WIDE_ARGUMENTS == 0)
 312       } else if (ireg == 3) {
 313         // uses R3 + one stack slot
 314         Register r = as_Register(ireg);
 315         regs[i].set_pair(VMRegImpl::stack2reg(slot), r->as_VMReg());
 316         ireg += 1;
 317         slot += 1;
 318 #endif
 319       } else {
 320         if (slot & 1) slot++; // Aligned location required
 321         regs[i].set_pair(VMRegImpl::stack2reg(slot+1), VMRegImpl::stack2reg(slot));
 322         slot += 2;
 323         ireg = 4;
 324       }
 325       break;
 326     case T_VOID:
 327       regs[i].set_bad();
 328       break;
 329 #ifdef __ABI_HARD__
 330     case T_FLOAT:
 331       if ((fp_slot < 16)||(single_fpr_slot & 1)) {
 332         if ((single_fpr_slot & 1) == 0) {
 333           single_fpr_slot = fp_slot;
 334           fp_slot += 2;
 335         }
 336         FloatRegister r = as_FloatRegister(single_fpr_slot);
 337         single_fpr_slot++;
 338         regs[i].set1(r->as_VMReg());
 339       } else {
 340         regs[i].set1(VMRegImpl::stack2reg(slot));
 341         slot++;
 342       }
 343       break;
 344     case T_DOUBLE:
 345       assert(ALIGN_WIDE_ARGUMENTS == 1, "ABI_HARD not supported with unaligned wide arguments");
 346       if (fp_slot <= 14) {
 347         FloatRegister r1 = as_FloatRegister(fp_slot);
 348         FloatRegister r2 = as_FloatRegister(fp_slot+1);
 349         regs[i].set_pair(r2->as_VMReg(), r1->as_VMReg());
 350         fp_slot += 2;
 351       } else {
 352         if(slot & 1) slot++;
 353         regs[i].set_pair(VMRegImpl::stack2reg(slot+1), VMRegImpl::stack2reg(slot));
 354         slot += 2;
 355         single_fpr_slot = 16;
 356       }
 357       break;
 358 #endif // __ABI_HARD__
 359     default:
 360       ShouldNotReachHere();
 361     }
 362   }
 363   return slot;
 364 }
 365 
 366 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 367                                            VMRegPair *regs,
 368                                            int total_args_passed,
 369                                            int is_outgoing) {
 370 #ifdef __SOFTFP__
 371   // soft float is the same as the C calling convention.
 372   return c_calling_convention(sig_bt, regs, NULL, total_args_passed);
 373 #endif // __SOFTFP__
 374   (void) is_outgoing;
 375   int slot = 0;
 376   int ireg = 0;
 377   int freg = 0;
 378   int single_fpr = 0;
 379 
 380   for (int i = 0; i < total_args_passed; i++) {
 381     switch (sig_bt[i]) {
 382     case T_SHORT:
 383     case T_CHAR:
 384     case T_BYTE:
 385     case T_BOOLEAN:
 386     case T_INT:
 387     case T_ARRAY:
 388     case T_OBJECT:
 389     case T_ADDRESS:
 390       if (ireg < 4) {
 391         Register r = as_Register(ireg++);
 392         regs[i].set1(r->as_VMReg());
 393       } else {
 394         regs[i].set1(VMRegImpl::stack2reg(slot++));
 395       }
 396       break;
 397     case T_FLOAT:
 398       // C2 utilizes S14/S15 for mem-mem moves
 399       if ((freg < 16 COMPILER2_PRESENT(-2)) || (single_fpr & 1)) {
 400         if ((single_fpr & 1) == 0) {
 401           single_fpr = freg;
 402           freg += 2;
 403         }
 404         FloatRegister r = as_FloatRegister(single_fpr++);
 405         regs[i].set1(r->as_VMReg());
 406       } else {
 407         regs[i].set1(VMRegImpl::stack2reg(slot++));
 408       }
 409       break;
 410     case T_DOUBLE:
 411       // C2 utilizes S14/S15 for mem-mem moves
 412       if (freg <= 14 COMPILER2_PRESENT(-2)) {
 413         FloatRegister r1 = as_FloatRegister(freg);
 414         FloatRegister r2 = as_FloatRegister(freg + 1);
 415         regs[i].set_pair(r2->as_VMReg(), r1->as_VMReg());
 416         freg += 2;
 417       } else {
 418         // Keep internally the aligned calling convention,
 419         // ignoring ALIGN_WIDE_ARGUMENTS
 420         if (slot & 1) slot++;
 421         regs[i].set_pair(VMRegImpl::stack2reg(slot + 1), VMRegImpl::stack2reg(slot));
 422         slot += 2;
 423         single_fpr = 16;
 424       }
 425       break;
 426     case T_LONG:
 427       // Keep internally the aligned calling convention,
 428       // ignoring ALIGN_WIDE_ARGUMENTS
 429       if (ireg <= 2) {
 430         if (ireg & 1) ireg++;
 431         Register r1 = as_Register(ireg);
 432         Register r2 = as_Register(ireg + 1);
 433         regs[i].set_pair(r2->as_VMReg(), r1->as_VMReg());
 434         ireg += 2;
 435       } else {
 436         if (slot & 1) slot++;
 437         regs[i].set_pair(VMRegImpl::stack2reg(slot + 1), VMRegImpl::stack2reg(slot));
 438         slot += 2;
 439         ireg = 4;
 440       }
 441       break;
 442     case T_VOID:
 443       regs[i].set_bad();
 444       break;
 445     default:
 446       ShouldNotReachHere();
 447     }
 448   }
 449 
 450   if (slot & 1) slot++;
 451   return slot;
 452 }
 453 
 454 static void patch_callers_callsite(MacroAssembler *masm) {
 455   Label skip;
 456 
 457   __ ldr(Rtemp, Address(Rmethod, Method::code_offset()));
 458   __ cbz(Rtemp, skip);
 459 
 460   // Pushing an even number of registers for stack alignment.
 461   // Selecting R9, which had to be saved anyway for some platforms.
 462   __ push(RegisterSet(R0, R3) | R9 | LR);
 463 
 464   __ mov(R0, Rmethod);
 465   __ mov(R1, LR);
 466   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
 467 
 468   __ pop(RegisterSet(R0, R3) | R9 | LR);
 469 
 470   __ bind(skip);
 471 }
 472 
 473 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 474                                     int total_args_passed, int comp_args_on_stack,
 475                                     const BasicType *sig_bt, const VMRegPair *regs) {
 476   // TODO: ARM - May be can use ldm to load arguments
 477   const Register tmp = Rtemp; // avoid erasing R5_mh
 478 
 479   // Next assert may not be needed but safer. Extra analysis required
 480   // if this there is not enough free registers and we need to use R5 here.
 481   assert_different_registers(tmp, R5_mh);
 482 
 483   // 6243940 We might end up in handle_wrong_method if
 484   // the callee is deoptimized as we race thru here. If that
 485   // happens we don't want to take a safepoint because the
 486   // caller frame will look interpreted and arguments are now
 487   // "compiled" so it is much better to make this transition
 488   // invisible to the stack walking code. Unfortunately if
 489   // we try and find the callee by normal means a safepoint
 490   // is possible. So we stash the desired callee in the thread
 491   // and the vm will find there should this case occur.
 492   Address callee_target_addr(Rthread, JavaThread::callee_target_offset());
 493   __ str(Rmethod, callee_target_addr);
 494 
 495 
 496   assert_different_registers(tmp, R0, R1, R2, R3, Rsender_sp, Rmethod);
 497 
 498   const Register initial_sp = Rmethod; // temporarily scratched
 499 
 500   // Old code was modifying R4 but this looks unsafe (particularly with JSR292)
 501   assert_different_registers(tmp, R0, R1, R2, R3, Rsender_sp, initial_sp);
 502 
 503   __ mov(initial_sp, SP);
 504 
 505   if (comp_args_on_stack) {
 506     __ sub_slow(SP, SP, comp_args_on_stack * VMRegImpl::stack_slot_size);
 507   }
 508   __ bic(SP, SP, StackAlignmentInBytes - 1);
 509 
 510   for (int i = 0; i < total_args_passed; i++) {
 511     if (sig_bt[i] == T_VOID) {
 512       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 513       continue;
 514     }
 515     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "must be ordered");
 516     int arg_offset = Interpreter::expr_offset_in_bytes(total_args_passed - 1 - i);
 517 
 518     VMReg r_1 = regs[i].first();
 519     VMReg r_2 = regs[i].second();
 520     if (r_1->is_stack()) {
 521       int stack_offset = r_1->reg2stack() * VMRegImpl::stack_slot_size;
 522       if (!r_2->is_valid()) {
 523         __ ldr(tmp, Address(initial_sp, arg_offset));
 524         __ str(tmp, Address(SP, stack_offset));
 525       } else {
 526         __ ldr(tmp, Address(initial_sp, arg_offset - Interpreter::stackElementSize));
 527         __ str(tmp, Address(SP, stack_offset));
 528         __ ldr(tmp, Address(initial_sp, arg_offset));
 529         __ str(tmp, Address(SP, stack_offset + wordSize));
 530       }
 531     } else if (r_1->is_Register()) {
 532       if (!r_2->is_valid()) {
 533         __ ldr(r_1->as_Register(), Address(initial_sp, arg_offset));
 534       } else {
 535         __ ldr(r_1->as_Register(), Address(initial_sp, arg_offset - Interpreter::stackElementSize));
 536         __ ldr(r_2->as_Register(), Address(initial_sp, arg_offset));
 537       }
 538     } else if (r_1->is_FloatRegister()) {
 539 #ifdef __SOFTFP__
 540       ShouldNotReachHere();
 541 #endif // __SOFTFP__
 542       if (!r_2->is_valid()) {
 543         __ flds(r_1->as_FloatRegister(), Address(initial_sp, arg_offset));
 544       } else {
 545         __ fldd(r_1->as_FloatRegister(), Address(initial_sp, arg_offset - Interpreter::stackElementSize));
 546       }
 547     } else {
 548       assert(!r_1->is_valid() && !r_2->is_valid(), "must be");
 549     }
 550   }
 551 
 552   // restore Rmethod (scratched for initial_sp)
 553   __ ldr(Rmethod, callee_target_addr);
 554   __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
 555 
 556 }
 557 
 558 static void gen_c2i_adapter(MacroAssembler *masm,
 559                             int total_args_passed,  int comp_args_on_stack,
 560                             const BasicType *sig_bt, const VMRegPair *regs,
 561                             Label& skip_fixup) {
 562   // TODO: ARM - May be can use stm to deoptimize arguments
 563   const Register tmp = Rtemp;
 564 
 565   patch_callers_callsite(masm);
 566   __ bind(skip_fixup);
 567 
 568   __ mov(Rsender_sp, SP); // not yet saved
 569 
 570 
 571   int extraspace = total_args_passed * Interpreter::stackElementSize;
 572   if (extraspace) {
 573     __ sub_slow(SP, SP, extraspace);
 574   }
 575 
 576   for (int i = 0; i < total_args_passed; i++) {
 577     if (sig_bt[i] == T_VOID) {
 578       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 579       continue;
 580     }
 581     int stack_offset = (total_args_passed - 1 - i) * Interpreter::stackElementSize;
 582 
 583     VMReg r_1 = regs[i].first();
 584     VMReg r_2 = regs[i].second();
 585     if (r_1->is_stack()) {
 586       int arg_offset = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 587       if (!r_2->is_valid()) {
 588         __ ldr(tmp, Address(SP, arg_offset));
 589         __ str(tmp, Address(SP, stack_offset));
 590       } else {
 591         __ ldr(tmp, Address(SP, arg_offset));
 592         __ str(tmp, Address(SP, stack_offset - Interpreter::stackElementSize));
 593         __ ldr(tmp, Address(SP, arg_offset + wordSize));
 594         __ str(tmp, Address(SP, stack_offset));
 595       }
 596     } else if (r_1->is_Register()) {
 597       if (!r_2->is_valid()) {
 598         __ str(r_1->as_Register(), Address(SP, stack_offset));
 599       } else {
 600         __ str(r_1->as_Register(), Address(SP, stack_offset - Interpreter::stackElementSize));
 601         __ str(r_2->as_Register(), Address(SP, stack_offset));
 602       }
 603     } else if (r_1->is_FloatRegister()) {
 604 #ifdef __SOFTFP__
 605       ShouldNotReachHere();
 606 #endif // __SOFTFP__
 607       if (!r_2->is_valid()) {
 608         __ fsts(r_1->as_FloatRegister(), Address(SP, stack_offset));
 609       } else {
 610         __ fstd(r_1->as_FloatRegister(), Address(SP, stack_offset - Interpreter::stackElementSize));
 611       }
 612     } else {
 613       assert(!r_1->is_valid() && !r_2->is_valid(), "must be");
 614     }
 615   }
 616 
 617   __ ldr(PC, Address(Rmethod, Method::interpreter_entry_offset()));
 618 
 619 }
 620 
 621 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 622                                                             int total_args_passed,
 623                                                             int comp_args_on_stack,
 624                                                             const BasicType *sig_bt,
 625                                                             const VMRegPair *regs,
 626                                                             AdapterFingerPrint* fingerprint) {
 627   address i2c_entry = __ pc();
 628   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 629 
 630   address c2i_unverified_entry = __ pc();
 631   Label skip_fixup;
 632   const Register receiver       = R0;
 633   const Register holder_klass   = Rtemp; // XXX should be OK for C2 but not 100% sure
 634   const Register receiver_klass = R4;
 635 
 636   __ load_klass(receiver_klass, receiver);
 637   __ ldr(holder_klass, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
 638   __ ldr(Rmethod, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
 639   __ cmp(receiver_klass, holder_klass);
 640 
 641   __ ldr(Rtemp, Address(Rmethod, Method::code_offset()), eq);
 642   __ cmp(Rtemp, 0, eq);
 643   __ b(skip_fixup, eq);
 644   __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
 645 
 646   address c2i_entry = __ pc();
 647   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 648 
 649   __ flush();
 650   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 651 }
 652 
 653 
 654 static int reg2offset_in(VMReg r) {
 655   // Account for saved FP and LR
 656   return r->reg2stack() * VMRegImpl::stack_slot_size + 2*wordSize;
 657 }
 658 
 659 static int reg2offset_out(VMReg r) {
 660   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 661 }
 662 
 663 
 664 static void verify_oop_args(MacroAssembler* masm,
 665                             const methodHandle& method,
 666                             const BasicType* sig_bt,
 667                             const VMRegPair* regs) {
 668   Register temp_reg = Rmethod;  // not part of any compiled calling seq
 669   if (VerifyOops) {
 670     for (int i = 0; i < method->size_of_parameters(); i++) {
 671       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
 672         VMReg r = regs[i].first();
 673         assert(r->is_valid(), "bad oop arg");
 674         if (r->is_stack()) {
 675           __ ldr(temp_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size));
 676           __ verify_oop(temp_reg);
 677         } else {
 678           __ verify_oop(r->as_Register());
 679         }
 680       }
 681     }
 682   }
 683 }
 684 
 685 static void gen_special_dispatch(MacroAssembler* masm,
 686                                  const methodHandle& method,
 687                                  const BasicType* sig_bt,
 688                                  const VMRegPair* regs) {
 689   verify_oop_args(masm, method, sig_bt, regs);
 690   vmIntrinsics::ID iid = method->intrinsic_id();
 691 
 692   // Now write the args into the outgoing interpreter space
 693   bool     has_receiver   = false;
 694   Register receiver_reg   = noreg;
 695   int      member_arg_pos = -1;
 696   Register member_reg     = noreg;
 697   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
 698   if (ref_kind != 0) {
 699     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
 700     member_reg = Rmethod;  // known to be free at this point
 701     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
 702   } else if (iid == vmIntrinsics::_invokeBasic) {
 703     has_receiver = true;
 704   } else {
 705     fatal("unexpected intrinsic id %d", iid);
 706   }
 707 
 708   if (member_reg != noreg) {
 709     // Load the member_arg into register, if necessary.
 710     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
 711     VMReg r = regs[member_arg_pos].first();
 712     if (r->is_stack()) {
 713       __ ldr(member_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size));
 714     } else {
 715       // no data motion is needed
 716       member_reg = r->as_Register();
 717     }
 718   }
 719 
 720   if (has_receiver) {
 721     // Make sure the receiver is loaded into a register.
 722     assert(method->size_of_parameters() > 0, "oob");
 723     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
 724     VMReg r = regs[0].first();
 725     assert(r->is_valid(), "bad receiver arg");
 726     if (r->is_stack()) {
 727       // Porting note:  This assumes that compiled calling conventions always
 728       // pass the receiver oop in a register.  If this is not true on some
 729       // platform, pick a temp and load the receiver from stack.
 730       assert(false, "receiver always in a register");
 731       receiver_reg = j_rarg0;  // known to be free at this point
 732       __ ldr(receiver_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size));
 733     } else {
 734       // no data motion is needed
 735       receiver_reg = r->as_Register();
 736     }
 737   }
 738 
 739   // Figure out which address we are really jumping to:
 740   MethodHandles::generate_method_handle_dispatch(masm, iid,
 741                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
 742 }
 743 
 744 // ---------------------------------------------------------------------------
 745 // Generate a native wrapper for a given method.  The method takes arguments
 746 // in the Java compiled code convention, marshals them to the native
 747 // convention (handlizes oops, etc), transitions to native, makes the call,
 748 // returns to java state (possibly blocking), unhandlizes any result and
 749 // returns.
 750 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
 751                                                 const methodHandle& method,
 752                                                 int compile_id,
 753                                                 BasicType* in_sig_bt,
 754                                                 VMRegPair* in_regs,
 755                                                 BasicType ret_type) {
 756   if (method->is_method_handle_intrinsic()) {
 757     vmIntrinsics::ID iid = method->intrinsic_id();
 758     intptr_t start = (intptr_t)__ pc();
 759     int vep_offset = ((intptr_t)__ pc()) - start;
 760     gen_special_dispatch(masm,
 761                          method,
 762                          in_sig_bt,
 763                          in_regs);
 764     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
 765     __ flush();
 766     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
 767     return nmethod::new_native_nmethod(method,
 768                                        compile_id,
 769                                        masm->code(),
 770                                        vep_offset,
 771                                        frame_complete,
 772                                        stack_slots / VMRegImpl::slots_per_word,
 773                                        in_ByteSize(-1),
 774                                        in_ByteSize(-1),
 775                                        (OopMapSet*)NULL);
 776   }
 777   // Arguments for JNI method include JNIEnv and Class if static
 778 
 779   // Usage of Rtemp should be OK since scratched by native call
 780 
 781   bool is_static = method->is_static();
 782 
 783   const int total_in_args = method->size_of_parameters();
 784   int total_c_args = total_in_args + 1;
 785   if (is_static) {
 786     total_c_args++;
 787   }
 788 
 789   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
 790   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
 791 
 792   int argc = 0;
 793   out_sig_bt[argc++] = T_ADDRESS;
 794   if (is_static) {
 795     out_sig_bt[argc++] = T_OBJECT;
 796   }
 797 
 798   int i;
 799   for (i = 0; i < total_in_args; i++) {
 800     out_sig_bt[argc++] = in_sig_bt[i];
 801   }
 802 
 803   int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 804   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
 805   // Since object arguments need to be wrapped, we must preserve space
 806   // for those object arguments which come in registers (GPR_PARAMS maximum)
 807   // plus one more slot for Klass handle (for static methods)
 808   int oop_handle_offset = stack_slots;
 809   stack_slots += (GPR_PARAMS + 1) * VMRegImpl::slots_per_word;
 810 
 811   // Plus a lock if needed
 812   int lock_slot_offset = 0;
 813   if (method->is_synchronized()) {
 814     lock_slot_offset = stack_slots;
 815     assert(sizeof(BasicLock) == wordSize, "adjust this code");
 816     stack_slots += VMRegImpl::slots_per_word;
 817   }
 818 
 819   // Space to save return address and FP
 820   stack_slots += 2 * VMRegImpl::slots_per_word;
 821 
 822   // Calculate the final stack size taking account of alignment
 823   stack_slots = align_up(stack_slots, StackAlignmentInBytes / VMRegImpl::stack_slot_size);
 824   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 825   int lock_slot_fp_offset = stack_size - 2 * wordSize -
 826     lock_slot_offset * VMRegImpl::stack_slot_size;
 827 
 828   // Unverified entry point
 829   address start = __ pc();
 830 
 831   // Inline cache check, same as in C1_MacroAssembler::inline_cache_check()
 832   const Register receiver = R0; // see receiverOpr()
 833   __ load_klass(Rtemp, receiver);
 834   __ cmp(Rtemp, Ricklass);
 835   Label verified;
 836 
 837   __ b(verified, eq); // jump over alignment no-ops too
 838   __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, Rtemp);
 839   __ align(CodeEntryAlignment);
 840 
 841   // Verified entry point
 842   __ bind(verified);
 843   int vep_offset = __ pc() - start;
 844 
 845 
 846   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
 847     // Object.hashCode, System.identityHashCode can pull the hashCode from the header word
 848     // instead of doing a full VM transition once it's been computed.
 849     Label slow_case;
 850     const Register obj_reg = R0;
 851 
 852     // Unlike for Object.hashCode, System.identityHashCode is static method and
 853     // gets object as argument instead of the receiver.
 854     if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) {
 855       assert(method->is_static(), "method should be static");
 856       // return 0 for null reference input, return val = R0 = obj_reg = 0
 857       __ cmp(obj_reg, 0);
 858       __ bx(LR, eq);
 859     }
 860 
 861     __ ldr(Rtemp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 862 
 863     assert(markOopDesc::unlocked_value == 1, "adjust this code");
 864     __ tbz(Rtemp, exact_log2(markOopDesc::unlocked_value), slow_case);
 865 
 866     if (UseBiasedLocking) {
 867       assert(is_power_of_2(markOopDesc::biased_lock_bit_in_place), "adjust this code");
 868       __ tbnz(Rtemp, exact_log2(markOopDesc::biased_lock_bit_in_place), slow_case);
 869     }
 870 
 871     __ bics(Rtemp, Rtemp, ~markOopDesc::hash_mask_in_place);
 872     __ mov(R0, AsmOperand(Rtemp, lsr, markOopDesc::hash_shift), ne);
 873     __ bx(LR, ne);
 874 
 875     __ bind(slow_case);
 876   }
 877 
 878   // Bang stack pages
 879   __ arm_stack_overflow_check(stack_size, Rtemp);
 880 
 881   // Setup frame linkage
 882   __ raw_push(FP, LR);
 883   __ mov(FP, SP);
 884   __ sub_slow(SP, SP, stack_size - 2*wordSize);
 885 
 886   int frame_complete = __ pc() - start;
 887 
 888   OopMapSet* oop_maps = new OopMapSet();
 889   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
 890   const int extra_args = is_static ? 2 : 1;
 891   int receiver_offset = -1;
 892   int fp_regs_in_arguments = 0;
 893 
 894   for (i = total_in_args; --i >= 0; ) {
 895     switch (in_sig_bt[i]) {
 896     case T_ARRAY:
 897     case T_OBJECT: {
 898       VMReg src = in_regs[i].first();
 899       VMReg dst = out_regs[i + extra_args].first();
 900       if (src->is_stack()) {
 901         assert(dst->is_stack(), "must be");
 902         assert(i != 0, "Incoming receiver is always in a register");
 903         __ ldr(Rtemp, Address(FP, reg2offset_in(src)));
 904         __ cmp(Rtemp, 0);
 905         __ add(Rtemp, FP, reg2offset_in(src), ne);
 906         __ str(Rtemp, Address(SP, reg2offset_out(dst)));
 907         int offset_in_older_frame = src->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 908         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
 909       } else {
 910         int offset = oop_handle_offset * VMRegImpl::stack_slot_size;
 911         __ str(src->as_Register(), Address(SP, offset));
 912         map->set_oop(VMRegImpl::stack2reg(oop_handle_offset));
 913         if ((i == 0) && (!is_static)) {
 914           receiver_offset = offset;
 915         }
 916         oop_handle_offset += VMRegImpl::slots_per_word;
 917 
 918         if (dst->is_stack()) {
 919           __ movs(Rtemp, src->as_Register());
 920           __ add(Rtemp, SP, offset, ne);
 921           __ str(Rtemp, Address(SP, reg2offset_out(dst)));
 922         } else {
 923           __ movs(dst->as_Register(), src->as_Register());
 924           __ add(dst->as_Register(), SP, offset, ne);
 925         }
 926       }
 927     }
 928 
 929     case T_VOID:
 930       break;
 931 
 932 
 933 #ifdef __SOFTFP__
 934     case T_DOUBLE:
 935 #endif
 936     case T_LONG: {
 937       VMReg src_1 = in_regs[i].first();
 938       VMReg src_2 = in_regs[i].second();
 939       VMReg dst_1 = out_regs[i + extra_args].first();
 940       VMReg dst_2 = out_regs[i + extra_args].second();
 941 #if (ALIGN_WIDE_ARGUMENTS == 0)
 942       // C convention can mix a register and a stack slot for a
 943       // 64-bits native argument.
 944 
 945       // Note: following code should work independently of whether
 946       // the Java calling convention follows C convention or whether
 947       // it aligns 64-bit values.
 948       if (dst_2->is_Register()) {
 949         if (src_1->as_Register() != dst_1->as_Register()) {
 950           assert(src_1->as_Register() != dst_2->as_Register() &&
 951                  src_2->as_Register() != dst_2->as_Register(), "must be");
 952           __ mov(dst_2->as_Register(), src_2->as_Register());
 953           __ mov(dst_1->as_Register(), src_1->as_Register());
 954         } else {
 955           assert(src_2->as_Register() == dst_2->as_Register(), "must be");
 956         }
 957       } else if (src_2->is_Register()) {
 958         if (dst_1->is_Register()) {
 959           // dst mixes a register and a stack slot
 960           assert(dst_2->is_stack() && src_1->is_Register() && src_2->is_Register(), "must be");
 961           assert(src_1->as_Register() != dst_1->as_Register(), "must be");
 962           __ str(src_2->as_Register(), Address(SP, reg2offset_out(dst_2)));
 963           __ mov(dst_1->as_Register(), src_1->as_Register());
 964         } else {
 965           // registers to stack slots
 966           assert(dst_2->is_stack() && src_1->is_Register() && src_2->is_Register(), "must be");
 967           __ str(src_1->as_Register(), Address(SP, reg2offset_out(dst_1)));
 968           __ str(src_2->as_Register(), Address(SP, reg2offset_out(dst_2)));
 969         }
 970       } else if (src_1->is_Register()) {
 971         if (dst_1->is_Register()) {
 972           // src and dst must be R3 + stack slot
 973           assert(dst_1->as_Register() == src_1->as_Register(), "must be");
 974           __ ldr(Rtemp,    Address(FP, reg2offset_in(src_2)));
 975           __ str(Rtemp,    Address(SP, reg2offset_out(dst_2)));
 976         } else {
 977           // <R3,stack> -> <stack,stack>
 978           assert(dst_2->is_stack() && src_2->is_stack(), "must be");
 979           __ ldr(LR, Address(FP, reg2offset_in(src_2)));
 980           __ str(src_1->as_Register(), Address(SP, reg2offset_out(dst_1)));
 981           __ str(LR, Address(SP, reg2offset_out(dst_2)));
 982         }
 983       } else {
 984         assert(src_2->is_stack() && dst_1->is_stack() && dst_2->is_stack(), "must be");
 985         __ ldr(Rtemp, Address(FP, reg2offset_in(src_1)));
 986         __ ldr(LR,    Address(FP, reg2offset_in(src_2)));
 987         __ str(Rtemp, Address(SP, reg2offset_out(dst_1)));
 988         __ str(LR,    Address(SP, reg2offset_out(dst_2)));
 989       }
 990 #else // ALIGN_WIDE_ARGUMENTS
 991       if (src_1->is_stack()) {
 992         assert(src_2->is_stack() && dst_1->is_stack() && dst_2->is_stack(), "must be");
 993         __ ldr(Rtemp, Address(FP, reg2offset_in(src_1)));
 994         __ ldr(LR,    Address(FP, reg2offset_in(src_2)));
 995         __ str(Rtemp, Address(SP, reg2offset_out(dst_1)));
 996         __ str(LR,    Address(SP, reg2offset_out(dst_2)));
 997       } else if (dst_1->is_stack()) {
 998         assert(dst_2->is_stack() && src_1->is_Register() && src_2->is_Register(), "must be");
 999         __ str(src_1->as_Register(), Address(SP, reg2offset_out(dst_1)));
1000         __ str(src_2->as_Register(), Address(SP, reg2offset_out(dst_2)));
1001       } else if (src_1->as_Register() == dst_1->as_Register()) {
1002         assert(src_2->as_Register() == dst_2->as_Register(), "must be");
1003       } else {
1004         assert(src_1->as_Register() != dst_2->as_Register() &&
1005                src_2->as_Register() != dst_2->as_Register(), "must be");
1006         __ mov(dst_2->as_Register(), src_2->as_Register());
1007         __ mov(dst_1->as_Register(), src_1->as_Register());
1008       }
1009 #endif // ALIGN_WIDE_ARGUMENTS
1010       break;
1011     }
1012 
1013 #if (!defined __SOFTFP__ && !defined __ABI_HARD__)
1014     case T_FLOAT: {
1015       VMReg src = in_regs[i].first();
1016       VMReg dst = out_regs[i + extra_args].first();
1017       if (src->is_stack()) {
1018         assert(dst->is_stack(), "must be");
1019         __ ldr(Rtemp, Address(FP, reg2offset_in(src)));
1020         __ str(Rtemp, Address(SP, reg2offset_out(dst)));
1021       } else if (dst->is_stack()) {
1022         __ fsts(src->as_FloatRegister(), Address(SP, reg2offset_out(dst)));
1023       } else {
1024         assert(src->is_FloatRegister() && dst->is_Register(), "must be");
1025         __ fmrs(dst->as_Register(), src->as_FloatRegister());
1026       }
1027       break;
1028     }
1029 
1030     case T_DOUBLE: {
1031       VMReg src_1 = in_regs[i].first();
1032       VMReg src_2 = in_regs[i].second();
1033       VMReg dst_1 = out_regs[i + extra_args].first();
1034       VMReg dst_2 = out_regs[i + extra_args].second();
1035       if (src_1->is_stack()) {
1036         assert(src_2->is_stack() && dst_1->is_stack() && dst_2->is_stack(), "must be");
1037         __ ldr(Rtemp, Address(FP, reg2offset_in(src_1)));
1038         __ ldr(LR,    Address(FP, reg2offset_in(src_2)));
1039         __ str(Rtemp, Address(SP, reg2offset_out(dst_1)));
1040         __ str(LR,    Address(SP, reg2offset_out(dst_2)));
1041       } else if (dst_1->is_stack()) {
1042         assert(dst_2->is_stack() && src_1->is_FloatRegister(), "must be");
1043         __ fstd(src_1->as_FloatRegister(), Address(SP, reg2offset_out(dst_1)));
1044 #if (ALIGN_WIDE_ARGUMENTS == 0)
1045       } else if (dst_2->is_stack()) {
1046         assert(! src_2->is_stack(), "must be"); // assuming internal java convention is aligned
1047         // double register must go into R3 + one stack slot
1048         __ fmrrd(dst_1->as_Register(), Rtemp, src_1->as_FloatRegister());
1049         __ str(Rtemp, Address(SP, reg2offset_out(dst_2)));
1050 #endif
1051       } else {
1052         assert(src_1->is_FloatRegister() && dst_1->is_Register() && dst_2->is_Register(), "must be");
1053         __ fmrrd(dst_1->as_Register(), dst_2->as_Register(), src_1->as_FloatRegister());
1054       }
1055       break;
1056     }
1057 #endif // __SOFTFP__
1058 
1059 #ifdef __ABI_HARD__
1060     case T_FLOAT: {
1061       VMReg src = in_regs[i].first();
1062       VMReg dst = out_regs[i + extra_args].first();
1063       if (src->is_stack()) {
1064         if (dst->is_stack()) {
1065           __ ldr(Rtemp, Address(FP, reg2offset_in(src)));
1066           __ str(Rtemp, Address(SP, reg2offset_out(dst)));
1067         } else {
1068           // C2 Java calling convention does not populate S14 and S15, therefore
1069           // those need to be loaded from stack here
1070           __ flds(dst->as_FloatRegister(), Address(FP, reg2offset_in(src)));
1071           fp_regs_in_arguments++;
1072         }
1073       } else {
1074         assert(src->is_FloatRegister(), "must be");
1075         fp_regs_in_arguments++;
1076       }
1077       break;
1078     }
1079     case T_DOUBLE: {
1080       VMReg src_1 = in_regs[i].first();
1081       VMReg src_2 = in_regs[i].second();
1082       VMReg dst_1 = out_regs[i + extra_args].first();
1083       VMReg dst_2 = out_regs[i + extra_args].second();
1084       if (src_1->is_stack()) {
1085         if (dst_1->is_stack()) {
1086           assert(dst_2->is_stack(), "must be");
1087           __ ldr(Rtemp, Address(FP, reg2offset_in(src_1)));
1088           __ ldr(LR,    Address(FP, reg2offset_in(src_2)));
1089           __ str(Rtemp, Address(SP, reg2offset_out(dst_1)));
1090           __ str(LR,    Address(SP, reg2offset_out(dst_2)));
1091         } else {
1092           // C2 Java calling convention does not populate S14 and S15, therefore
1093           // those need to be loaded from stack here
1094           __ fldd(dst_1->as_FloatRegister(), Address(FP, reg2offset_in(src_1)));
1095           fp_regs_in_arguments += 2;
1096         }
1097       } else {
1098         assert(src_1->is_FloatRegister() && src_2->is_FloatRegister(), "must be");
1099         fp_regs_in_arguments += 2;
1100       }
1101       break;
1102     }
1103 #endif // __ABI_HARD__
1104 
1105     default: {
1106       assert(in_sig_bt[i] != T_ADDRESS, "found T_ADDRESS in java args");
1107       VMReg src = in_regs[i].first();
1108       VMReg dst = out_regs[i + extra_args].first();
1109       if (src->is_stack()) {
1110         assert(dst->is_stack(), "must be");
1111         __ ldr(Rtemp, Address(FP, reg2offset_in(src)));
1112         __ str(Rtemp, Address(SP, reg2offset_out(dst)));
1113       } else if (dst->is_stack()) {
1114         __ str(src->as_Register(), Address(SP, reg2offset_out(dst)));
1115       } else {
1116         assert(src->is_Register() && dst->is_Register(), "must be");
1117         __ mov(dst->as_Register(), src->as_Register());
1118       }
1119     }
1120     }
1121   }
1122 
1123   // Get Klass mirror
1124   int klass_offset = -1;
1125   if (is_static) {
1126     klass_offset = oop_handle_offset * VMRegImpl::stack_slot_size;
1127     __ mov_oop(Rtemp, JNIHandles::make_local(method->method_holder()->java_mirror()));
1128     __ add(c_rarg1, SP, klass_offset);
1129     __ str(Rtemp, Address(SP, klass_offset));
1130     map->set_oop(VMRegImpl::stack2reg(oop_handle_offset));
1131   }
1132 
1133   // the PC offset given to add_gc_map must match the PC saved in set_last_Java_frame
1134   int pc_offset = __ set_last_Java_frame(SP, FP, true, Rtemp);
1135   assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
1136   oop_maps->add_gc_map(pc_offset, map);
1137 
1138   // Order last_Java_pc store with the thread state transition (to _thread_in_native)
1139   __ membar(MacroAssembler::StoreStore, Rtemp);
1140 
1141   // RedefineClasses() tracing support for obsolete method entry
1142   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1143     __ save_caller_save_registers();
1144     __ mov(R0, Rthread);
1145     __ mov_metadata(R1, method());
1146     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), R0, R1);
1147     __ restore_caller_save_registers();
1148   }
1149 
1150   const Register sync_handle = R5;
1151   const Register sync_obj    = R6;
1152   const Register disp_hdr    = altFP_7_11;
1153   const Register tmp         = R8;
1154 
1155   Label slow_lock, slow_lock_biased, lock_done, fast_lock;
1156   if (method->is_synchronized()) {
1157     // The first argument is a handle to sync object (a class or an instance)
1158     __ ldr(sync_obj, Address(R1));
1159     // Remember the handle for the unlocking code
1160     __ mov(sync_handle, R1);
1161 
1162     __ resolve(IS_NOT_NULL, sync_obj);
1163 
1164     if(UseBiasedLocking) {
1165       __ biased_locking_enter(sync_obj, tmp, disp_hdr/*scratched*/, false, Rtemp, lock_done, slow_lock_biased);
1166     }
1167 
1168     const Register mark = tmp;
1169     // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
1170     // That would be acceptable as either CAS or slow case path is taken in that case
1171 
1172     __ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
1173     __ sub(disp_hdr, FP, lock_slot_fp_offset);
1174     __ tst(mark, markOopDesc::unlocked_value);
1175     __ b(fast_lock, ne);
1176 
1177     // Check for recursive lock
1178     // See comments in InterpreterMacroAssembler::lock_object for
1179     // explanations on the fast recursive locking check.
1180     // Check independently the low bits and the distance to SP
1181     // -1- test low 2 bits
1182     __ movs(Rtemp, AsmOperand(mark, lsl, 30));
1183     // -2- test (hdr - SP) if the low two bits are 0
1184     __ sub(Rtemp, mark, SP, eq);
1185     __ movs(Rtemp, AsmOperand(Rtemp, lsr, exact_log2(os::vm_page_size())), eq);
1186     // If still 'eq' then recursive locking OK: set displaced header to 0
1187     __ str(Rtemp, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()), eq);
1188     __ b(lock_done, eq);
1189     __ b(slow_lock);
1190 
1191     __ bind(fast_lock);
1192     __ str(mark, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
1193 
1194     __ cas_for_lock_acquire(mark, disp_hdr, sync_obj, Rtemp, slow_lock);
1195 
1196     __ bind(lock_done);
1197   }
1198 
1199   // Get JNIEnv*
1200   __ add(c_rarg0, Rthread, in_bytes(JavaThread::jni_environment_offset()));
1201 
1202   // Perform thread state transition
1203   __ mov(Rtemp, _thread_in_native);
1204   __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1205 
1206   // Finally, call the native method
1207   __ call(method->native_function());
1208 
1209   // Set FPSCR/FPCR to a known state
1210   if (AlwaysRestoreFPU) {
1211     __ restore_default_fp_mode();
1212   }
1213 
1214   // Do a safepoint check while thread is in transition state
1215   InlinedAddress safepoint_state(SafepointSynchronize::address_of_state());
1216   Label call_safepoint_runtime, return_to_java;
1217   __ mov(Rtemp, _thread_in_native_trans);
1218   __ ldr_literal(R2, safepoint_state);
1219   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1220 
1221   // make sure the store is observed before reading the SafepointSynchronize state and further mem refs
1222   __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
1223 
1224   __ ldr_s32(R2, Address(R2));
1225   __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset()));
1226   __ cmp(R2, SafepointSynchronize::_not_synchronized);
1227   __ cond_cmp(R3, 0, eq);
1228   __ b(call_safepoint_runtime, ne);
1229   __ bind(return_to_java);
1230 
1231   // Perform thread state transition and reguard stack yellow pages if needed
1232   Label reguard, reguard_done;
1233   __ mov(Rtemp, _thread_in_Java);
1234   __ ldr_s32(R2, Address(Rthread, JavaThread::stack_guard_state_offset()));
1235   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1236 
1237   __ cmp(R2, JavaThread::stack_guard_yellow_reserved_disabled);
1238   __ b(reguard, eq);
1239   __ bind(reguard_done);
1240 
1241   Label slow_unlock, unlock_done;
1242   if (method->is_synchronized()) {
1243     __ ldr(sync_obj, Address(sync_handle));
1244 
1245     __ resolve(IS_NOT_NULL, sync_obj);
1246 
1247     if(UseBiasedLocking) {
1248       __ biased_locking_exit(sync_obj, Rtemp, unlock_done);
1249       // disp_hdr may not have been saved on entry with biased locking
1250       __ sub(disp_hdr, FP, lock_slot_fp_offset);
1251     }
1252 
1253     // See C1_MacroAssembler::unlock_object() for more comments
1254     __ ldr(R2, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
1255     __ cbz(R2, unlock_done);
1256 
1257     __ cas_for_lock_release(disp_hdr, R2, sync_obj, Rtemp, slow_unlock);
1258 
1259     __ bind(unlock_done);
1260   }
1261 
1262   // Set last java frame and handle block to zero
1263   __ ldr(LR, Address(Rthread, JavaThread::active_handles_offset()));
1264   __ reset_last_Java_frame(Rtemp); // sets Rtemp to 0 on 32-bit ARM
1265 
1266   __ str_32(Rtemp, Address(LR, JNIHandleBlock::top_offset_in_bytes()));
1267   if (CheckJNICalls) {
1268     __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1269   }
1270 
1271   // Unbox oop result, e.g. JNIHandles::resolve value in R0.
1272   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
1273     __ resolve_jobject(R0,      // value
1274                        Rtemp,   // tmp1
1275                        R1_tmp); // tmp2
1276   }
1277 
1278   // Any exception pending?
1279   __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
1280   __ mov(SP, FP);
1281 
1282   __ cmp(Rtemp, 0);
1283   // Pop the frame and return if no exception pending
1284   __ pop(RegisterSet(FP) | RegisterSet(PC), eq);
1285   // Pop the frame and forward the exception. Rexception_pc contains return address.
1286   __ ldr(FP, Address(SP, wordSize, post_indexed), ne);
1287   __ ldr(Rexception_pc, Address(SP, wordSize, post_indexed), ne);
1288   __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
1289 
1290   // Safepoint operation and/or pending suspend request is in progress.
1291   // Save the return values and call the runtime function by hand.
1292   __ bind(call_safepoint_runtime);
1293   push_result_registers(masm, ret_type);
1294   __ mov(R0, Rthread);
1295   __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1296   pop_result_registers(masm, ret_type);
1297   __ b(return_to_java);
1298 
1299   __ bind_literal(safepoint_state);
1300 
1301   // Reguard stack pages. Save native results around a call to C runtime.
1302   __ bind(reguard);
1303   push_result_registers(masm, ret_type);
1304   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1305   pop_result_registers(masm, ret_type);
1306   __ b(reguard_done);
1307 
1308   if (method->is_synchronized()) {
1309     // Locking slow case
1310     if(UseBiasedLocking) {
1311       __ bind(slow_lock_biased);
1312       __ sub(disp_hdr, FP, lock_slot_fp_offset);
1313     }
1314 
1315     __ bind(slow_lock);
1316 
1317     push_param_registers(masm, fp_regs_in_arguments);
1318 
1319     // last_Java_frame is already set, so do call_VM manually; no exception can occur
1320     __ mov(R0, sync_obj);
1321     __ mov(R1, disp_hdr);
1322     __ mov(R2, Rthread);
1323     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C));
1324 
1325     pop_param_registers(masm, fp_regs_in_arguments);
1326 
1327     __ b(lock_done);
1328 
1329     // Unlocking slow case
1330     __ bind(slow_unlock);
1331 
1332     push_result_registers(masm, ret_type);
1333 
1334     // Clear pending exception before reentering VM.
1335     // Can store the oop in register since it is a leaf call.
1336     assert_different_registers(Rtmp_save1, sync_obj, disp_hdr);
1337     __ ldr(Rtmp_save1, Address(Rthread, Thread::pending_exception_offset()));
1338     Register zero = __ zero_register(Rtemp);
1339     __ str(zero, Address(Rthread, Thread::pending_exception_offset()));
1340     __ mov(R0, sync_obj);
1341     __ mov(R1, disp_hdr);
1342     __ mov(R2, Rthread);
1343     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1344     __ str(Rtmp_save1, Address(Rthread, Thread::pending_exception_offset()));
1345 
1346     pop_result_registers(masm, ret_type);
1347 
1348     __ b(unlock_done);
1349   }
1350 
1351   __ flush();
1352   return nmethod::new_native_nmethod(method,
1353                                      compile_id,
1354                                      masm->code(),
1355                                      vep_offset,
1356                                      frame_complete,
1357                                      stack_slots / VMRegImpl::slots_per_word,
1358                                      in_ByteSize(is_static ? klass_offset : receiver_offset),
1359                                      in_ByteSize(lock_slot_offset * VMRegImpl::stack_slot_size),
1360                                      oop_maps);
1361 }
1362 
1363 // this function returns the adjust size (in number of words) to a c2i adapter
1364 // activation for use during deoptimization
1365 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
1366   int extra_locals_size = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
1367   return extra_locals_size;
1368 }
1369 
1370 
1371 uint SharedRuntime::out_preserve_stack_slots() {
1372   return 0;
1373 }
1374 
1375 
1376 //------------------------------generate_deopt_blob----------------------------
1377 void SharedRuntime::generate_deopt_blob() {
1378   ResourceMark rm;
1379   CodeBuffer buffer("deopt_blob", 1024, 1024);
1380   int frame_size_in_words;
1381   OopMapSet* oop_maps;
1382   int reexecute_offset;
1383   int exception_in_tls_offset;
1384   int exception_offset;
1385 
1386   MacroAssembler* masm = new MacroAssembler(&buffer);
1387   Label cont;
1388   const Register Rkind   = R9; // caller-saved
1389   const Register Rublock = R6;
1390   const Register Rsender = altFP_7_11;
1391   assert_different_registers(Rkind, Rublock, Rsender, Rexception_obj, Rexception_pc, R0, R1, R2, R3, R8, Rtemp);
1392 
1393   address start = __ pc();
1394 
1395   oop_maps = new OopMapSet();
1396   // LR saved by caller (can be live in c2 method)
1397 
1398   // A deopt is a case where LR may be live in the c2 nmethod. So it's
1399   // not possible to call the deopt blob from the nmethod and pass the
1400   // address of the deopt handler of the nmethod in LR. What happens
1401   // now is that the caller of the deopt blob pushes the current
1402   // address so the deopt blob doesn't have to do it. This way LR can
1403   // be preserved, contains the live value from the nmethod and is
1404   // saved at R14/R30_offset here.
1405   OopMap* map = RegisterSaver::save_live_registers(masm, &frame_size_in_words, true);
1406   __ mov(Rkind, Deoptimization::Unpack_deopt);
1407   __ b(cont);
1408 
1409   exception_offset = __ pc() - start;
1410 
1411   // Transfer Rexception_obj & Rexception_pc in TLS and fall thru to the
1412   // exception_in_tls_offset entry point.
1413   __ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
1414   __ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
1415   // Force return value to NULL to avoid confusing the escape analysis
1416   // logic. Everything is dead here anyway.
1417   __ mov(R0, 0);
1418 
1419   exception_in_tls_offset = __ pc() - start;
1420 
1421   // Exception data is in JavaThread structure
1422   // Patch the return address of the current frame
1423   __ ldr(LR, Address(Rthread, JavaThread::exception_pc_offset()));
1424   (void) RegisterSaver::save_live_registers(masm, &frame_size_in_words);
1425   {
1426     const Register Rzero = __ zero_register(Rtemp); // XXX should be OK for C2 but not 100% sure
1427     __ str(Rzero, Address(Rthread, JavaThread::exception_pc_offset()));
1428   }
1429   __ mov(Rkind, Deoptimization::Unpack_exception);
1430   __ b(cont);
1431 
1432   reexecute_offset = __ pc() - start;
1433 
1434   (void) RegisterSaver::save_live_registers(masm, &frame_size_in_words);
1435   __ mov(Rkind, Deoptimization::Unpack_reexecute);
1436 
1437   // Calculate UnrollBlock and save the result in Rublock
1438   __ bind(cont);
1439   __ mov(R0, Rthread);
1440   __ mov(R1, Rkind);
1441 
1442   int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp); // note: FP may not need to be saved (not on x86)
1443   assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
1444   __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info));
1445   if (pc_offset == -1) {
1446     pc_offset = __ offset();
1447   }
1448   oop_maps->add_gc_map(pc_offset, map);
1449   __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
1450 
1451   __ mov(Rublock, R0);
1452 
1453   // Reload Rkind from the UnrollBlock (might have changed)
1454   __ ldr_s32(Rkind, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
1455   Label noException;
1456   __ cmp_32(Rkind, Deoptimization::Unpack_exception);   // Was exception pending?
1457   __ b(noException, ne);
1458   // handle exception case
1459 #ifdef ASSERT
1460   // assert that exception_pc is zero in tls
1461   { Label L;
1462     __ ldr(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
1463     __ cbz(Rexception_pc, L);
1464     __ stop("exception pc should be null");
1465     __ bind(L);
1466   }
1467 #endif
1468   __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
1469   __ verify_oop(Rexception_obj);
1470   {
1471     const Register Rzero = __ zero_register(Rtemp);
1472     __ str(Rzero, Address(Rthread, JavaThread::exception_oop_offset()));
1473   }
1474 
1475   __ bind(noException);
1476 
1477   // This frame is going away.  Fetch return value, so we can move it to
1478   // a new frame.
1479   __ ldr(R0, Address(SP, RegisterSaver::R0_offset * wordSize));
1480   __ ldr(R1, Address(SP, RegisterSaver::R1_offset * wordSize));
1481 #ifndef __SOFTFP__
1482   __ ldr_double(D0, Address(SP, RegisterSaver::D0_offset * wordSize));
1483 #endif
1484   // pop frame
1485   __ add(SP, SP, RegisterSaver::reg_save_size * wordSize);
1486 
1487   // Set initial stack state before pushing interpreter frames
1488   __ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
1489   __ ldr(R2, Address(Rublock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
1490   __ ldr(R3, Address(Rublock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
1491 
1492   __ add(SP, SP, Rtemp);
1493 
1494 #ifdef ASSERT
1495   // Compilers generate code that bang the stack by as much as the
1496   // interpreter would need. So this stack banging should never
1497   // trigger a fault. Verify that it does not on non product builds.
1498   // See if it is enough stack to push deoptimized frames
1499   if (UseStackBanging) {
1500     // The compiled method that we are deoptimizing was popped from the stack.
1501     // If the stack bang results in a stack overflow, we don't return to the
1502     // method that is being deoptimized. The stack overflow exception is
1503     // propagated to the caller of the deoptimized method. Need to get the pc
1504     // from the caller in LR and restore FP.
1505     __ ldr(LR, Address(R2, 0));
1506     __ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
1507     __ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
1508     __ arm_stack_overflow_check(R8, Rtemp);
1509   }
1510 #endif
1511   __ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
1512 
1513   // Pick up the initial fp we should save
1514   // XXX Note: was ldr(FP, Address(FP));
1515 
1516   // The compiler no longer uses FP as a frame pointer for the
1517   // compiled code. It can be used by the allocator in C2 or to
1518   // memorize the original SP for JSR292 call sites.
1519 
1520   // Hence, ldr(FP, Address(FP)) is probably not correct. For x86,
1521   // Deoptimization::fetch_unroll_info computes the right FP value and
1522   // stores it in Rublock.initial_info. This has been activated for ARM.
1523   __ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
1524 
1525   __ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
1526   __ mov(Rsender, SP);
1527   __ sub(SP, SP, Rtemp);
1528 
1529   // Push interpreter frames in a loop
1530   Label loop;
1531   __ bind(loop);
1532   __ ldr(LR, Address(R2, wordSize, post_indexed));         // load frame pc
1533   __ ldr(Rtemp, Address(R3, wordSize, post_indexed));      // load frame size
1534 
1535   __ raw_push(FP, LR);                                     // create new frame
1536   __ mov(FP, SP);
1537   __ sub(Rtemp, Rtemp, 2*wordSize);
1538 
1539   __ sub(SP, SP, Rtemp);
1540 
1541   __ str(Rsender, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
1542   __ mov(LR, 0);
1543   __ str(LR, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1544 
1545   __ subs(R8, R8, 1);                               // decrement counter
1546   __ mov(Rsender, SP);
1547   __ b(loop, ne);
1548 
1549   // Re-push self-frame
1550   __ ldr(LR, Address(R2));
1551   __ raw_push(FP, LR);
1552   __ mov(FP, SP);
1553   __ sub(SP, SP, (frame_size_in_words - 2) * wordSize);
1554 
1555   // Restore frame locals after moving the frame
1556   __ str(R0, Address(SP, RegisterSaver::R0_offset * wordSize));
1557   __ str(R1, Address(SP, RegisterSaver::R1_offset * wordSize));
1558 
1559 #ifndef __SOFTFP__
1560   __ str_double(D0, Address(SP, RegisterSaver::D0_offset * wordSize));
1561 #endif // !__SOFTFP__
1562 
1563 #ifdef ASSERT
1564   // Reload Rkind from the UnrollBlock and check that it was not overwritten (Rkind is not callee-saved)
1565   { Label L;
1566     __ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
1567     __ cmp_32(Rkind, Rtemp);
1568     __ b(L, eq);
1569     __ stop("Rkind was overwritten");
1570     __ bind(L);
1571   }
1572 #endif
1573 
1574   // Call unpack_frames with proper arguments
1575   __ mov(R0, Rthread);
1576   __ mov(R1, Rkind);
1577 
1578   pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
1579   assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
1580   __ call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
1581   if (pc_offset == -1) {
1582     pc_offset = __ offset();
1583   }
1584   oop_maps->add_gc_map(pc_offset, new OopMap(frame_size_in_words * VMRegImpl::slots_per_word, 0));
1585   __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
1586 
1587   // Collect return values, pop self-frame and jump to interpreter
1588   __ ldr(R0, Address(SP, RegisterSaver::R0_offset * wordSize));
1589   __ ldr(R1, Address(SP, RegisterSaver::R1_offset * wordSize));
1590   // Interpreter floats controlled by __SOFTFP__, but compiler
1591   // float return value registers controlled by __ABI_HARD__
1592   // This matters for vfp-sflt builds.
1593 #ifndef __SOFTFP__
1594   // Interpreter hard float
1595 #ifdef __ABI_HARD__
1596   // Compiler float return value in FP registers
1597   __ ldr_double(D0, Address(SP, RegisterSaver::D0_offset * wordSize));
1598 #else
1599   // Compiler float return value in integer registers,
1600   // copy to D0 for interpreter (S0 <-- R0)
1601   __ fmdrr(D0_tos, R0, R1);
1602 #endif
1603 #endif // !__SOFTFP__
1604   __ mov(SP, FP);
1605 
1606   __ pop(RegisterSet(FP) | RegisterSet(PC));
1607 
1608   __ flush();
1609 
1610   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset,
1611                                            reexecute_offset, frame_size_in_words);
1612   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
1613 }
1614 
1615 #ifdef COMPILER2
1616 
1617 //------------------------------generate_uncommon_trap_blob--------------------
1618 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
1619 // instead.
1620 void SharedRuntime::generate_uncommon_trap_blob() {
1621   // allocate space for the code
1622   ResourceMark rm;
1623 
1624   // setup code generation tools
1625   int pad = VerifyThread ? 512 : 0;
1626 #ifdef _LP64
1627   CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
1628 #else
1629   // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
1630   // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
1631   CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
1632 #endif
1633   // bypassed when code generation useless
1634   MacroAssembler* masm               = new MacroAssembler(&buffer);
1635   const Register Rublock = R6;
1636   const Register Rsender = altFP_7_11;
1637   assert_different_registers(Rublock, Rsender, Rexception_obj, R0, R1, R2, R3, R8, Rtemp);
1638 
1639   //
1640   // This is the entry point for all traps the compiler takes when it thinks
1641   // it cannot handle further execution of compilation code. The frame is
1642   // deoptimized in these cases and converted into interpreter frames for
1643   // execution
1644   // The steps taken by this frame are as follows:
1645   //   - push a fake "unpack_frame"
1646   //   - call the C routine Deoptimization::uncommon_trap (this function
1647   //     packs the current compiled frame into vframe arrays and returns
1648   //     information about the number and size of interpreter frames which
1649   //     are equivalent to the frame which is being deoptimized)
1650   //   - deallocate the "unpack_frame"
1651   //   - deallocate the deoptimization frame
1652   //   - in a loop using the information returned in the previous step
1653   //     push interpreter frames;
1654   //   - create a dummy "unpack_frame"
1655   //   - call the C routine: Deoptimization::unpack_frames (this function
1656   //     lays out values on the interpreter frame which was just created)
1657   //   - deallocate the dummy unpack_frame
1658   //   - return to the interpreter entry point
1659   //
1660   //  Refer to the following methods for more information:
1661   //   - Deoptimization::uncommon_trap
1662   //   - Deoptimization::unpack_frame
1663 
1664   // the unloaded class index is in R0 (first parameter to this blob)
1665 
1666   __ raw_push(FP, LR);
1667   __ set_last_Java_frame(SP, FP, false, Rtemp);
1668   __ mov(R2, Deoptimization::Unpack_uncommon_trap);
1669   __ mov(R1, R0);
1670   __ mov(R0, Rthread);
1671   __ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
1672   __ mov(Rublock, R0);
1673   __ reset_last_Java_frame(Rtemp);
1674   __ raw_pop(FP, LR);
1675 
1676 #ifdef ASSERT
1677   { Label L;
1678     __ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
1679     __ cmp_32(Rtemp, Deoptimization::Unpack_uncommon_trap);
1680     __ b(L, eq);
1681     __ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
1682     __ bind(L);
1683   }
1684 #endif
1685 
1686 
1687   // Set initial stack state before pushing interpreter frames
1688   __ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
1689   __ ldr(R2, Address(Rublock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
1690   __ ldr(R3, Address(Rublock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
1691 
1692   __ add(SP, SP, Rtemp);
1693 
1694   // See if it is enough stack to push deoptimized frames
1695 #ifdef ASSERT
1696   // Compilers generate code that bang the stack by as much as the
1697   // interpreter would need. So this stack banging should never
1698   // trigger a fault. Verify that it does not on non product builds.
1699   if (UseStackBanging) {
1700     // The compiled method that we are deoptimizing was popped from the stack.
1701     // If the stack bang results in a stack overflow, we don't return to the
1702     // method that is being deoptimized. The stack overflow exception is
1703     // propagated to the caller of the deoptimized method. Need to get the pc
1704     // from the caller in LR and restore FP.
1705     __ ldr(LR, Address(R2, 0));
1706     __ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
1707     __ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
1708     __ arm_stack_overflow_check(R8, Rtemp);
1709   }
1710 #endif
1711   __ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
1712   __ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
1713   __ mov(Rsender, SP);
1714   __ sub(SP, SP, Rtemp);
1715   //  __ ldr(FP, Address(FP));
1716   __ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
1717 
1718   // Push interpreter frames in a loop
1719   Label loop;
1720   __ bind(loop);
1721   __ ldr(LR, Address(R2, wordSize, post_indexed));         // load frame pc
1722   __ ldr(Rtemp, Address(R3, wordSize, post_indexed));      // load frame size
1723 
1724   __ raw_push(FP, LR);                                     // create new frame
1725   __ mov(FP, SP);
1726   __ sub(Rtemp, Rtemp, 2*wordSize);
1727 
1728   __ sub(SP, SP, Rtemp);
1729 
1730   __ str(Rsender, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
1731   __ mov(LR, 0);
1732   __ str(LR, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1733   __ subs(R8, R8, 1);                               // decrement counter
1734   __ mov(Rsender, SP);
1735   __ b(loop, ne);
1736 
1737   // Re-push self-frame
1738   __ ldr(LR, Address(R2));
1739   __ raw_push(FP, LR);
1740   __ mov(FP, SP);
1741 
1742   // Call unpack_frames with proper arguments
1743   __ mov(R0, Rthread);
1744   __ mov(R1, Deoptimization::Unpack_uncommon_trap);
1745   __ set_last_Java_frame(SP, FP, false, Rtemp);
1746   __ call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
1747   //  oop_maps->add_gc_map(__ pc() - start, new OopMap(frame_size_in_words, 0));
1748   __ reset_last_Java_frame(Rtemp);
1749 
1750   __ mov(SP, FP);
1751   __ pop(RegisterSet(FP) | RegisterSet(PC));
1752 
1753   masm->flush();
1754   _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, 2 /* LR+FP */);
1755 }
1756 
1757 #endif // COMPILER2
1758 
1759 //------------------------------generate_handler_blob------
1760 //
1761 // Generate a special Compile2Runtime blob that saves all registers,
1762 // setup oopmap, and calls safepoint code to stop the compiled code for
1763 // a safepoint.
1764 //
1765 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
1766   assert(StubRoutines::forward_exception_entry() != NULL, "must be generated before");
1767 
1768   ResourceMark rm;
1769   CodeBuffer buffer("handler_blob", 256, 256);
1770   int frame_size_words;
1771   OopMapSet* oop_maps;
1772 
1773   bool cause_return = (poll_type == POLL_AT_RETURN);
1774 
1775   MacroAssembler* masm = new MacroAssembler(&buffer);
1776   address start = __ pc();
1777   oop_maps = new OopMapSet();
1778 
1779   if (!cause_return) {
1780     __ sub(SP, SP, 4); // make room for LR which may still be live
1781                        // here if we are coming from a c2 method
1782   }
1783 
1784   OopMap* map = RegisterSaver::save_live_registers(masm, &frame_size_words, !cause_return);
1785   if (!cause_return) {
1786     // update saved PC with correct value
1787     // need 2 steps because LR can be live in c2 method
1788     __ ldr(LR, Address(Rthread, JavaThread::saved_exception_pc_offset()));
1789     __ str(LR, Address(SP, RegisterSaver::LR_offset * wordSize));
1790   }
1791 
1792   __ mov(R0, Rthread);
1793   int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp); // note: FP may not need to be saved (not on x86)
1794   assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
1795   __ call(call_ptr);
1796   if (pc_offset == -1) {
1797     pc_offset = __ offset();
1798   }
1799   oop_maps->add_gc_map(pc_offset, map);
1800   __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
1801 
1802   // Check for pending exception
1803   __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
1804   __ cmp(Rtemp, 0);
1805 
1806   if (!cause_return) {
1807     RegisterSaver::restore_live_registers(masm, false);
1808     __ pop(PC, eq);
1809     __ pop(Rexception_pc);
1810   } else {
1811     RegisterSaver::restore_live_registers(masm);
1812     __ bx(LR, eq);
1813     __ mov(Rexception_pc, LR);
1814   }
1815 
1816   __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
1817 
1818   __ flush();
1819 
1820   return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
1821 }
1822 
1823 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
1824   assert(StubRoutines::forward_exception_entry() != NULL, "must be generated before");
1825 
1826   ResourceMark rm;
1827   CodeBuffer buffer(name, 1000, 512);
1828   int frame_size_words;
1829   OopMapSet *oop_maps;
1830   int frame_complete;
1831 
1832   MacroAssembler* masm = new MacroAssembler(&buffer);
1833   Label pending_exception;
1834 
1835   int start = __ offset();
1836 
1837   oop_maps = new OopMapSet();
1838   OopMap* map = RegisterSaver::save_live_registers(masm, &frame_size_words);
1839 
1840   frame_complete = __ offset();
1841 
1842   __ mov(R0, Rthread);
1843 
1844   int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
1845   assert(start == 0, "warning: start differs from code_begin");
1846   __ call(destination);
1847   if (pc_offset == -1) {
1848     pc_offset = __ offset();
1849   }
1850   oop_maps->add_gc_map(pc_offset, map);
1851   __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
1852 
1853   __ ldr(R1, Address(Rthread, Thread::pending_exception_offset()));
1854   __ cbnz(R1, pending_exception);
1855 
1856   // Overwrite saved register values
1857 
1858   // Place metadata result of VM call into Rmethod
1859   __ get_vm_result_2(R1, Rtemp);
1860   __ str(R1, Address(SP, RegisterSaver::Rmethod_offset * wordSize));
1861 
1862   // Place target address (VM call result) into Rtemp
1863   __ str(R0, Address(SP, RegisterSaver::Rtemp_offset * wordSize));
1864 
1865   RegisterSaver::restore_live_registers(masm);
1866   __ jump(Rtemp);
1867 
1868   __ bind(pending_exception);
1869 
1870   RegisterSaver::restore_live_registers(masm);
1871   const Register Rzero = __ zero_register(Rtemp);
1872   __ str(Rzero, Address(Rthread, JavaThread::vm_result_2_offset()));
1873   __ mov(Rexception_pc, LR);
1874   __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
1875 
1876   __ flush();
1877 
1878   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
1879 }