1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "interpreter/interp_masm.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/templateTable.hpp"
  31 #include "memory/universe.hpp"
  32 #include "oops/cpCache.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/synchronizer.hpp"
  41 
  42 #define __ _masm->
  43 
  44 //----------------------------------------------------------------------------------------------------
  45 // Platform-dependent initialization
  46 
  47 void TemplateTable::pd_initialize() {
  48   // No arm specific initialization
  49 }
  50 
  51 //----------------------------------------------------------------------------------------------------
  52 // Address computation
  53 
  54 // local variables
  55 static inline Address iaddress(int n)            {
  56   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  57 }
  58 
  59 static inline Address laddress(int n)            { return iaddress(n + 1); }
  60 #ifndef AARCH64
  61 static inline Address haddress(int n)            { return iaddress(n + 0); }
  62 #endif // !AARCH64
  63 
  64 static inline Address faddress(int n)            { return iaddress(n); }
  65 static inline Address daddress(int n)            { return laddress(n); }
  66 static inline Address aaddress(int n)            { return iaddress(n); }
  67 
  68 
  69 void TemplateTable::get_local_base_addr(Register r, Register index) {
  70   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  71 }
  72 
  73 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  74 #ifdef AARCH64
  75   get_local_base_addr(scratch, index);
  76   return Address(scratch);
  77 #else
  78   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  79 #endif // AARCH64
  80 }
  81 
  82 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  83   return load_iaddress(index, scratch);
  84 }
  85 
  86 Address TemplateTable::load_faddress(Register index, Register scratch) {
  87 #ifdef __SOFTFP__
  88   return load_iaddress(index, scratch);
  89 #else
  90   get_local_base_addr(scratch, index);
  91   return Address(scratch);
  92 #endif // __SOFTFP__
  93 }
  94 
  95 Address TemplateTable::load_daddress(Register index, Register scratch) {
  96   get_local_base_addr(scratch, index);
  97   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  98 }
  99 
 100 // At top of Java expression stack which may be different than SP.
 101 // It isn't for category 1 objects.
 102 static inline Address at_tos() {
 103   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
 104 }
 105 
 106 static inline Address at_tos_p1() {
 107   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 108 }
 109 
 110 static inline Address at_tos_p2() {
 111   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 112 }
 113 
 114 
 115 // 32-bit ARM:
 116 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 117 // separate ldr instructions (supports nonadjacent values).
 118 // Used for longs in all modes, and for doubles in SOFTFP mode.
 119 //
 120 // AArch64: loads long local into R0_tos.
 121 //
 122 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 123   const Register Rlocal_base = tmp;
 124   assert_different_registers(Rlocal_index, tmp);
 125 
 126   get_local_base_addr(Rlocal_base, Rlocal_index);
 127 #ifdef AARCH64
 128   __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 129 #else
 130   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 131   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 132 #endif // AARCH64
 133 }
 134 
 135 
 136 // 32-bit ARM:
 137 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 138 // separate str instructions (supports nonadjacent values).
 139 // Used for longs in all modes, and for doubles in SOFTFP mode
 140 //
 141 // AArch64: stores R0_tos to long local.
 142 //
 143 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 144   const Register Rlocal_base = tmp;
 145   assert_different_registers(Rlocal_index, tmp);
 146 
 147   get_local_base_addr(Rlocal_base, Rlocal_index);
 148 #ifdef AARCH64
 149   __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 150 #else
 151   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 152   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 153 #endif // AARCH64
 154 }
 155 
 156 // Returns address of Java array element using temp register as address base.
 157 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 158   int logElemSize = exact_log2(type2aelembytes(elemType));
 159   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 160   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 161 }
 162 
 163 //----------------------------------------------------------------------------------------------------
 164 // Condition conversion
 165 AsmCondition convNegCond(TemplateTable::Condition cc) {
 166   switch (cc) {
 167     case TemplateTable::equal        : return ne;
 168     case TemplateTable::not_equal    : return eq;
 169     case TemplateTable::less         : return ge;
 170     case TemplateTable::less_equal   : return gt;
 171     case TemplateTable::greater      : return le;
 172     case TemplateTable::greater_equal: return lt;
 173   }
 174   ShouldNotReachHere();
 175   return nv;
 176 }
 177 
 178 //----------------------------------------------------------------------------------------------------
 179 // Miscelaneous helper routines
 180 
 181 // Store an oop (or NULL) at the address described by obj.
 182 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 183 // Also destroys new_val and obj.base().
 184 static void do_oop_store(InterpreterMacroAssembler* _masm,
 185                          Address obj,
 186                          Register new_val,
 187                          Register tmp1,
 188                          Register tmp2,
 189                          Register tmp3,
 190                          BarrierSet::Name barrier,
 191                          bool precise,
 192                          bool is_null) {
 193 
 194   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 195   switch (barrier) {
 196 #if INCLUDE_ALL_GCS
 197     case BarrierSet::G1BarrierSet:
 198       {
 199         // flatten object address if needed
 200         assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
 201 
 202         const Register store_addr = obj.base();
 203         if (obj.index() != noreg) {
 204           assert (obj.disp() == 0, "index or displacement, not both");
 205 #ifdef AARCH64
 206           __ add(store_addr, obj.base(), obj.index(), obj.extend(), obj.shift_imm());
 207 #else
 208           assert(obj.offset_op() == add_offset, "addition is expected");
 209           __ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
 210 #endif // AARCH64
 211         } else if (obj.disp() != 0) {
 212           __ add(store_addr, obj.base(), obj.disp());
 213         }
 214 
 215         __ g1_write_barrier_pre(store_addr, new_val, tmp1, tmp2, tmp3);
 216         if (is_null) {
 217           __ store_heap_oop_null(new_val, Address(store_addr));
 218         } else {
 219           // G1 barrier needs uncompressed oop for region cross check.
 220           Register val_to_store = new_val;
 221           if (UseCompressedOops) {
 222             val_to_store = tmp1;
 223             __ mov(val_to_store, new_val);
 224           }
 225           __ store_heap_oop(val_to_store, Address(store_addr)); // blows val_to_store:
 226           val_to_store = noreg;
 227           __ g1_write_barrier_post(store_addr, new_val, tmp1, tmp2, tmp3);
 228         }
 229       }
 230       break;
 231 #endif // INCLUDE_ALL_GCS
 232     case BarrierSet::CardTableBarrierSet:
 233       {
 234         if (is_null) {
 235           __ store_heap_oop_null(new_val, obj);
 236         } else {
 237           assert (!precise || (obj.index() == noreg && obj.disp() == 0),
 238                   "store check address should be calculated beforehand");
 239 
 240           __ store_check_part1(tmp1);
 241           __ store_heap_oop(new_val, obj); // blows new_val:
 242           new_val = noreg;
 243           __ store_check_part2(obj.base(), tmp1, tmp2);
 244         }
 245       }
 246       break;
 247     case BarrierSet::ModRef:
 248       ShouldNotReachHere();
 249       break;
 250     default:
 251       ShouldNotReachHere();
 252       break;
 253   }
 254 }
 255 
 256 Address TemplateTable::at_bcp(int offset) {
 257   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 258   return Address(Rbcp, offset);
 259 }
 260 
 261 
 262 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
 263 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 264                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 265                                    int byte_no) {
 266   assert_different_registers(bc_reg, temp_reg);
 267   if (!RewriteBytecodes)  return;
 268   Label L_patch_done;
 269 
 270   switch (bc) {
 271   case Bytecodes::_fast_aputfield:
 272   case Bytecodes::_fast_bputfield:
 273   case Bytecodes::_fast_zputfield:
 274   case Bytecodes::_fast_cputfield:
 275   case Bytecodes::_fast_dputfield:
 276   case Bytecodes::_fast_fputfield:
 277   case Bytecodes::_fast_iputfield:
 278   case Bytecodes::_fast_lputfield:
 279   case Bytecodes::_fast_sputfield:
 280     {
 281       // We skip bytecode quickening for putfield instructions when
 282       // the put_code written to the constant pool cache is zero.
 283       // This is required so that every execution of this instruction
 284       // calls out to InterpreterRuntime::resolve_get_put to do
 285       // additional, required work.
 286       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 287       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 288       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 289       __ mov(bc_reg, bc);
 290       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 291     }
 292     break;
 293   default:
 294     assert(byte_no == -1, "sanity");
 295     // the pair bytecodes have already done the load.
 296     if (load_bc_into_bc_reg) {
 297       __ mov(bc_reg, bc);
 298     }
 299   }
 300 
 301   if (__ can_post_breakpoint()) {
 302     Label L_fast_patch;
 303     // if a breakpoint is present we can't rewrite the stream directly
 304     __ ldrb(temp_reg, at_bcp(0));
 305     __ cmp(temp_reg, Bytecodes::_breakpoint);
 306     __ b(L_fast_patch, ne);
 307     if (bc_reg != R3) {
 308       __ mov(R3, bc_reg);
 309     }
 310     __ mov(R1, Rmethod);
 311     __ mov(R2, Rbcp);
 312     // Let breakpoint table handling rewrite to quicker bytecode
 313     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 314     __ b(L_patch_done);
 315     __ bind(L_fast_patch);
 316   }
 317 
 318 #ifdef ASSERT
 319   Label L_okay;
 320   __ ldrb(temp_reg, at_bcp(0));
 321   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 322   __ b(L_okay, eq);
 323   __ cmp(temp_reg, bc_reg);
 324   __ b(L_okay, eq);
 325   __ stop("patching the wrong bytecode");
 326   __ bind(L_okay);
 327 #endif
 328 
 329   // patch bytecode
 330   __ strb(bc_reg, at_bcp(0));
 331   __ bind(L_patch_done);
 332 }
 333 
 334 //----------------------------------------------------------------------------------------------------
 335 // Individual instructions
 336 
 337 void TemplateTable::nop() {
 338   transition(vtos, vtos);
 339   // nothing to do
 340 }
 341 
 342 void TemplateTable::shouldnotreachhere() {
 343   transition(vtos, vtos);
 344   __ stop("shouldnotreachhere bytecode");
 345 }
 346 
 347 
 348 
 349 void TemplateTable::aconst_null() {
 350   transition(vtos, atos);
 351   __ mov(R0_tos, 0);
 352 }
 353 
 354 
 355 void TemplateTable::iconst(int value) {
 356   transition(vtos, itos);
 357   __ mov_slow(R0_tos, value);
 358 }
 359 
 360 
 361 void TemplateTable::lconst(int value) {
 362   transition(vtos, ltos);
 363   assert((value == 0) || (value == 1), "unexpected long constant");
 364   __ mov(R0_tos, value);
 365 #ifndef AARCH64
 366   __ mov(R1_tos_hi, 0);
 367 #endif // !AARCH64
 368 }
 369 
 370 
 371 void TemplateTable::fconst(int value) {
 372   transition(vtos, ftos);
 373 #ifdef AARCH64
 374   switch(value) {
 375   case 0:   __ fmov_sw(S0_tos, ZR);    break;
 376   case 1:   __ fmov_s (S0_tos, 0x70);  break;
 377   case 2:   __ fmov_s (S0_tos, 0x00);  break;
 378   default:  ShouldNotReachHere();      break;
 379   }
 380 #else
 381   const int zero = 0;         // 0.0f
 382   const int one = 0x3f800000; // 1.0f
 383   const int two = 0x40000000; // 2.0f
 384 
 385   switch(value) {
 386   case 0:   __ mov(R0_tos, zero);   break;
 387   case 1:   __ mov(R0_tos, one);    break;
 388   case 2:   __ mov(R0_tos, two);    break;
 389   default:  ShouldNotReachHere();   break;
 390   }
 391 
 392 #ifndef __SOFTFP__
 393   __ fmsr(S0_tos, R0_tos);
 394 #endif // !__SOFTFP__
 395 #endif // AARCH64
 396 }
 397 
 398 
 399 void TemplateTable::dconst(int value) {
 400   transition(vtos, dtos);
 401 #ifdef AARCH64
 402   switch(value) {
 403   case 0:   __ fmov_dx(D0_tos, ZR);    break;
 404   case 1:   __ fmov_d (D0_tos, 0x70);  break;
 405   default:  ShouldNotReachHere();      break;
 406   }
 407 #else
 408   const int one_lo = 0;            // low part of 1.0
 409   const int one_hi = 0x3ff00000;   // high part of 1.0
 410 
 411   if (value == 0) {
 412 #ifdef __SOFTFP__
 413     __ mov(R0_tos_lo, 0);
 414     __ mov(R1_tos_hi, 0);
 415 #else
 416     __ mov(R0_tmp, 0);
 417     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 418 #endif // __SOFTFP__
 419   } else if (value == 1) {
 420     __ mov(R0_tos_lo, one_lo);
 421     __ mov_slow(R1_tos_hi, one_hi);
 422 #ifndef __SOFTFP__
 423     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 424 #endif // !__SOFTFP__
 425   } else {
 426     ShouldNotReachHere();
 427   }
 428 #endif // AARCH64
 429 }
 430 
 431 
 432 void TemplateTable::bipush() {
 433   transition(vtos, itos);
 434   __ ldrsb(R0_tos, at_bcp(1));
 435 }
 436 
 437 
 438 void TemplateTable::sipush() {
 439   transition(vtos, itos);
 440   __ ldrsb(R0_tmp, at_bcp(1));
 441   __ ldrb(R1_tmp, at_bcp(2));
 442   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 443 }
 444 
 445 
 446 void TemplateTable::ldc(bool wide) {
 447   transition(vtos, vtos);
 448   Label fastCase, Condy, Done;
 449 
 450   const Register Rindex = R1_tmp;
 451   const Register Rcpool = R2_tmp;
 452   const Register Rtags  = R3_tmp;
 453   const Register RtagType = R3_tmp;
 454 
 455   if (wide) {
 456     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 457   } else {
 458     __ ldrb(Rindex, at_bcp(1));
 459   }
 460   __ get_cpool_and_tags(Rcpool, Rtags);
 461 
 462   const int base_offset = ConstantPool::header_size() * wordSize;
 463   const int tags_offset = Array<u1>::base_offset_in_bytes();
 464 
 465   // get const type
 466   __ add(Rtemp, Rtags, tags_offset);
 467 #ifdef AARCH64
 468   __ add(Rtemp, Rtemp, Rindex);
 469   __ ldarb(RtagType, Rtemp);  // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
 470 #else
 471   __ ldrb(RtagType, Address(Rtemp, Rindex));
 472   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 473 #endif // AARCH64
 474 
 475   // unresolved class - get the resolved class
 476   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 477 
 478   // unresolved class in error (resolution failed) - call into runtime
 479   // so that the same error from first resolution attempt is thrown.
 480 #ifdef AARCH64
 481   __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
 482   __ cond_cmp(RtagType, Rtemp, ne);
 483 #else
 484   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 485 #endif // AARCH64
 486 
 487   // resolved class - need to call vm to get java mirror of the class
 488   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 489 
 490   __ b(fastCase, ne);
 491 
 492   // slow case - call runtime
 493   __ mov(R1, wide);
 494   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 495   __ push(atos);
 496   __ b(Done);
 497 
 498   // int, float, String
 499   __ bind(fastCase);
 500 
 501   __ cmp(RtagType, JVM_CONSTANT_Integer);
 502   __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 503   __ b(Condy, ne);
 504 
 505   // itos, ftos
 506   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 507   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 508 
 509   // floats and ints are placed on stack in the same way, so
 510   // we can use push(itos) to transfer float value without VFP
 511   __ push(itos);
 512   __ b(Done);
 513 
 514   __ bind(Condy);
 515   condy_helper(Done);
 516 
 517   __ bind(Done);
 518 }
 519 
 520 // Fast path for caching oop constants.
 521 void TemplateTable::fast_aldc(bool wide) {
 522   transition(vtos, atos);
 523   int index_size = wide ? sizeof(u2) : sizeof(u1);
 524   Label resolved;
 525 
 526   // We are resolved if the resolved reference cache entry contains a
 527   // non-null object (CallSite, etc.)
 528   assert_different_registers(R0_tos, R2_tmp);
 529   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 530   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 531   __ cbnz(R0_tos, resolved);
 532 
 533   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 534 
 535   // first time invocation - must resolve first
 536   __ mov(R1, (int)bytecode());
 537   __ call_VM(R0_tos, entry, R1);
 538   __ bind(resolved);
 539 
 540   { // Check for the null sentinel.
 541     // If we just called the VM, that already did the mapping for us,
 542     // but it's harmless to retry.
 543     Label notNull;
 544     Register result = R0;
 545     Register tmp = R1;
 546     Register rarg = R2;
 547 
 548     // Stash null_sentinel address to get its value later
 549     __ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
 550     __ ldr(tmp, Address(rarg));
 551     __ cmp(result, tmp);
 552     __ b(notNull, ne);
 553     __ mov(result, 0);  // NULL object reference
 554     __ bind(notNull);
 555   }
 556 
 557   if (VerifyOops) {
 558     __ verify_oop(R0_tos);
 559   }
 560 }
 561 
 562 void TemplateTable::ldc2_w() {
 563   transition(vtos, vtos);
 564   const Register Rtags  = R2_tmp;
 565   const Register Rindex = R3_tmp;
 566   const Register Rcpool = R4_tmp;
 567   const Register Rbase  = R5_tmp;
 568 
 569   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 570 
 571   __ get_cpool_and_tags(Rcpool, Rtags);
 572   const int base_offset = ConstantPool::header_size() * wordSize;
 573   const int tags_offset = Array<u1>::base_offset_in_bytes();
 574 
 575   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 576 
 577   Label Condy, exit;
 578 #ifdef __ABI_HARD__
 579   Label Long;
 580   // get type from tags
 581   __ add(Rtemp, Rtags, tags_offset);
 582   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 583   __ cmp(Rtemp, JVM_CONSTANT_Double);
 584   __ b(Long, ne);
 585   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 586 
 587   __ push(dtos);
 588   __ b(exit);
 589   __ bind(Long);
 590 #endif
 591 
 592   __ cmp(Rtemp, JVM_CONSTANT_Long);
 593   __ b(Condy, ne);
 594 #ifdef AARCH64
 595   __ ldr(R0_tos, Address(Rbase, base_offset));
 596 #else
 597   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 598   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 599 #endif // AARCH64
 600   __ push(ltos);
 601   __ b(exit);
 602 
 603   __ bind(Condy);
 604   condy_helper(exit);
 605 
 606   __ bind(exit);
 607 }
 608 
 609 
 610 void TemplateTable::condy_helper(Label& Done)
 611 {
 612   Register obj   = R0_tmp;
 613   Register rtmp  = R1_tmp;
 614   Register flags = R2_tmp;
 615   Register off   = R3_tmp;
 616 
 617   __ mov(R1, (int) bytecode());
 618   __ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), R1);
 619   __ get_vm_result_2(flags, rtmp);
 620 
 621   // VMr = obj = base address to find primitive value to push
 622   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 623   __ mov(off, flags);
 624 
 625 #ifdef AARCH64
 626   __ andr(off, off, (unsigned)ConstantPoolCacheEntry::field_index_mask);
 627 #else
 628   __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 629   __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 630 #endif
 631 
 632   const Address field(obj, off);
 633 
 634   __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
 635 
 636   switch (bytecode()) {
 637     case Bytecodes::_ldc:
 638     case Bytecodes::_ldc_w:
 639       {
 640         // tos in (itos, ftos, stos, btos, ctos, ztos)
 641         Label notIntFloat, notShort, notByte, notChar, notBool;
 642         __ cmp(flags, itos);
 643         __ cond_cmp(flags, ftos, ne);
 644         __ b(notIntFloat, ne);
 645         __ ldr(R0, field);
 646         __ push(itos);
 647         __ b(Done);
 648 
 649         __ bind(notIntFloat);
 650         __ cmp(flags, stos);
 651         __ b(notShort, ne);
 652         __ ldrsh(R0, field);
 653         __ push(stos);
 654         __ b(Done);
 655 
 656         __ bind(notShort);
 657         __ cmp(flags, btos);
 658         __ b(notByte, ne);
 659         __ ldrsb(R0, field);
 660         __ push(btos);
 661         __ b(Done);
 662 
 663         __ bind(notByte);
 664         __ cmp(flags, ctos);
 665         __ b(notChar, ne);
 666         __ ldrh(R0, field);
 667         __ push(ctos);
 668         __ b(Done);
 669 
 670         __ bind(notChar);
 671         __ cmp(flags, ztos);
 672         __ b(notBool, ne);
 673         __ ldrsb(R0, field);
 674         __ push(ztos);
 675         __ b(Done);
 676 
 677         __ bind(notBool);
 678         break;
 679       }
 680 
 681     case Bytecodes::_ldc2_w:
 682       {
 683         Label notLongDouble;
 684         __ cmp(flags, ltos);
 685         __ cond_cmp(flags, dtos, ne);
 686         __ b(notLongDouble, ne);
 687 
 688 #ifdef AARCH64
 689         __ ldr(R0_tos, field);
 690 #else
 691         __ add(rtmp, obj, wordSize);
 692         __ ldr(R0_tos_lo, Address(obj, off));
 693         __ ldr(R1_tos_hi, Address(rtmp, off));
 694 #endif
 695         __ push(ltos);
 696         __ b(Done);
 697 
 698         __ bind(notLongDouble);
 699 
 700         break;
 701       }
 702 
 703     default:
 704       ShouldNotReachHere();
 705     }
 706 
 707     __ stop("bad ldc/condy");
 708 }
 709 
 710 
 711 void TemplateTable::locals_index(Register reg, int offset) {
 712   __ ldrb(reg, at_bcp(offset));
 713 }
 714 
 715 void TemplateTable::iload() {
 716   iload_internal();
 717 }
 718 
 719 void TemplateTable::nofast_iload() {
 720   iload_internal(may_not_rewrite);
 721 }
 722 
 723 void TemplateTable::iload_internal(RewriteControl rc) {
 724   transition(vtos, itos);
 725 
 726   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 727     Label rewrite, done;
 728     const Register next_bytecode = R1_tmp;
 729     const Register target_bytecode = R2_tmp;
 730 
 731     // get next byte
 732     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 733     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 734     // last two iloads in a pair.  Comparing against fast_iload means that
 735     // the next bytecode is neither an iload or a caload, and therefore
 736     // an iload pair.
 737     __ cmp(next_bytecode, Bytecodes::_iload);
 738     __ b(done, eq);
 739 
 740     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 741     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 742     __ b(rewrite, eq);
 743 
 744     // if _caload, rewrite to fast_icaload
 745     __ cmp(next_bytecode, Bytecodes::_caload);
 746     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 747     __ b(rewrite, eq);
 748 
 749     // rewrite so iload doesn't check again.
 750     __ mov(target_bytecode, Bytecodes::_fast_iload);
 751 
 752     // rewrite
 753     // R2: fast bytecode
 754     __ bind(rewrite);
 755     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 756     __ bind(done);
 757   }
 758 
 759   // Get the local value into tos
 760   const Register Rlocal_index = R1_tmp;
 761   locals_index(Rlocal_index);
 762   Address local = load_iaddress(Rlocal_index, Rtemp);
 763   __ ldr_s32(R0_tos, local);
 764 }
 765 
 766 
 767 void TemplateTable::fast_iload2() {
 768   transition(vtos, itos);
 769   const Register Rlocal_index = R1_tmp;
 770 
 771   locals_index(Rlocal_index);
 772   Address local = load_iaddress(Rlocal_index, Rtemp);
 773   __ ldr_s32(R0_tos, local);
 774   __ push(itos);
 775 
 776   locals_index(Rlocal_index, 3);
 777   local = load_iaddress(Rlocal_index, Rtemp);
 778   __ ldr_s32(R0_tos, local);
 779 }
 780 
 781 void TemplateTable::fast_iload() {
 782   transition(vtos, itos);
 783   const Register Rlocal_index = R1_tmp;
 784 
 785   locals_index(Rlocal_index);
 786   Address local = load_iaddress(Rlocal_index, Rtemp);
 787   __ ldr_s32(R0_tos, local);
 788 }
 789 
 790 
 791 void TemplateTable::lload() {
 792   transition(vtos, ltos);
 793   const Register Rlocal_index = R2_tmp;
 794 
 795   locals_index(Rlocal_index);
 796   load_category2_local(Rlocal_index, R3_tmp);
 797 }
 798 
 799 
 800 void TemplateTable::fload() {
 801   transition(vtos, ftos);
 802   const Register Rlocal_index = R2_tmp;
 803 
 804   // Get the local value into tos
 805   locals_index(Rlocal_index);
 806   Address local = load_faddress(Rlocal_index, Rtemp);
 807 #ifdef __SOFTFP__
 808   __ ldr(R0_tos, local);
 809 #else
 810   __ ldr_float(S0_tos, local);
 811 #endif // __SOFTFP__
 812 }
 813 
 814 
 815 void TemplateTable::dload() {
 816   transition(vtos, dtos);
 817   const Register Rlocal_index = R2_tmp;
 818 
 819   locals_index(Rlocal_index);
 820 
 821 #ifdef __SOFTFP__
 822   load_category2_local(Rlocal_index, R3_tmp);
 823 #else
 824   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 825 #endif // __SOFTFP__
 826 }
 827 
 828 
 829 void TemplateTable::aload() {
 830   transition(vtos, atos);
 831   const Register Rlocal_index = R1_tmp;
 832 
 833   locals_index(Rlocal_index);
 834   Address local = load_aaddress(Rlocal_index, Rtemp);
 835   __ ldr(R0_tos, local);
 836 }
 837 
 838 
 839 void TemplateTable::locals_index_wide(Register reg) {
 840   assert_different_registers(reg, Rtemp);
 841   __ ldrb(Rtemp, at_bcp(2));
 842   __ ldrb(reg, at_bcp(3));
 843   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 844 }
 845 
 846 
 847 void TemplateTable::wide_iload() {
 848   transition(vtos, itos);
 849   const Register Rlocal_index = R2_tmp;
 850 
 851   locals_index_wide(Rlocal_index);
 852   Address local = load_iaddress(Rlocal_index, Rtemp);
 853   __ ldr_s32(R0_tos, local);
 854 }
 855 
 856 
 857 void TemplateTable::wide_lload() {
 858   transition(vtos, ltos);
 859   const Register Rlocal_index = R2_tmp;
 860   const Register Rlocal_base = R3_tmp;
 861 
 862   locals_index_wide(Rlocal_index);
 863   load_category2_local(Rlocal_index, R3_tmp);
 864 }
 865 
 866 
 867 void TemplateTable::wide_fload() {
 868   transition(vtos, ftos);
 869   const Register Rlocal_index = R2_tmp;
 870 
 871   locals_index_wide(Rlocal_index);
 872   Address local = load_faddress(Rlocal_index, Rtemp);
 873 #ifdef __SOFTFP__
 874   __ ldr(R0_tos, local);
 875 #else
 876   __ ldr_float(S0_tos, local);
 877 #endif // __SOFTFP__
 878 }
 879 
 880 
 881 void TemplateTable::wide_dload() {
 882   transition(vtos, dtos);
 883   const Register Rlocal_index = R2_tmp;
 884 
 885   locals_index_wide(Rlocal_index);
 886 #ifdef __SOFTFP__
 887   load_category2_local(Rlocal_index, R3_tmp);
 888 #else
 889   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 890 #endif // __SOFTFP__
 891 }
 892 
 893 
 894 void TemplateTable::wide_aload() {
 895   transition(vtos, atos);
 896   const Register Rlocal_index = R2_tmp;
 897 
 898   locals_index_wide(Rlocal_index);
 899   Address local = load_aaddress(Rlocal_index, Rtemp);
 900   __ ldr(R0_tos, local);
 901 }
 902 
 903 void TemplateTable::index_check(Register array, Register index) {
 904   // Pop ptr into array
 905   __ pop_ptr(array);
 906   index_check_without_pop(array, index);
 907 }
 908 
 909 void TemplateTable::index_check_without_pop(Register array, Register index) {
 910   assert_different_registers(array, index, Rtemp);
 911   // check array
 912   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 913   // check index
 914   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 915   __ cmp_32(index, Rtemp);
 916   if (index != R4_ArrayIndexOutOfBounds_index) {
 917     // convention with generate_ArrayIndexOutOfBounds_handler()
 918     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 919   }
 920   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 921 }
 922 
 923 
 924 void TemplateTable::iaload() {
 925   transition(itos, itos);
 926   const Register Rarray = R1_tmp;
 927   const Register Rindex = R0_tos;
 928 
 929   index_check(Rarray, Rindex);
 930   __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
 931 }
 932 
 933 
 934 void TemplateTable::laload() {
 935   transition(itos, ltos);
 936   const Register Rarray = R1_tmp;
 937   const Register Rindex = R0_tos;
 938 
 939   index_check(Rarray, Rindex);
 940 
 941 #ifdef AARCH64
 942   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 943 #else
 944   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 945   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
 946   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 947 #endif // AARCH64
 948 }
 949 
 950 
 951 void TemplateTable::faload() {
 952   transition(itos, ftos);
 953   const Register Rarray = R1_tmp;
 954   const Register Rindex = R0_tos;
 955 
 956   index_check(Rarray, Rindex);
 957 
 958   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
 959 #ifdef __SOFTFP__
 960   __ ldr(R0_tos, addr);
 961 #else
 962   __ ldr_float(S0_tos, addr);
 963 #endif // __SOFTFP__
 964 }
 965 
 966 
 967 void TemplateTable::daload() {
 968   transition(itos, dtos);
 969   const Register Rarray = R1_tmp;
 970   const Register Rindex = R0_tos;
 971 
 972   index_check(Rarray, Rindex);
 973 
 974 #ifdef __SOFTFP__
 975   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 976   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
 977   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 978 #else
 979   __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
 980 #endif // __SOFTFP__
 981 }
 982 
 983 
 984 void TemplateTable::aaload() {
 985   transition(itos, atos);
 986   const Register Rarray = R1_tmp;
 987   const Register Rindex = R0_tos;
 988 
 989   index_check(Rarray, Rindex);
 990   __ load_heap_oop(R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp));
 991 }
 992 
 993 
 994 void TemplateTable::baload() {
 995   transition(itos, itos);
 996   const Register Rarray = R1_tmp;
 997   const Register Rindex = R0_tos;
 998 
 999   index_check(Rarray, Rindex);
1000   __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1001 }
1002 
1003 
1004 void TemplateTable::caload() {
1005   transition(itos, itos);
1006   const Register Rarray = R1_tmp;
1007   const Register Rindex = R0_tos;
1008 
1009   index_check(Rarray, Rindex);
1010   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1011 }
1012 
1013 
1014 // iload followed by caload frequent pair
1015 void TemplateTable::fast_icaload() {
1016   transition(vtos, itos);
1017   const Register Rlocal_index = R1_tmp;
1018   const Register Rarray = R1_tmp;
1019   const Register Rindex = R4_tmp; // index_check prefers index on R4
1020   assert_different_registers(Rlocal_index, Rindex);
1021   assert_different_registers(Rarray, Rindex);
1022 
1023   // load index out of locals
1024   locals_index(Rlocal_index);
1025   Address local = load_iaddress(Rlocal_index, Rtemp);
1026   __ ldr_s32(Rindex, local);
1027 
1028   // get array element
1029   index_check(Rarray, Rindex);
1030   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1031 }
1032 
1033 
1034 void TemplateTable::saload() {
1035   transition(itos, itos);
1036   const Register Rarray = R1_tmp;
1037   const Register Rindex = R0_tos;
1038 
1039   index_check(Rarray, Rindex);
1040   __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
1041 }
1042 
1043 
1044 void TemplateTable::iload(int n) {
1045   transition(vtos, itos);
1046   __ ldr_s32(R0_tos, iaddress(n));
1047 }
1048 
1049 
1050 void TemplateTable::lload(int n) {
1051   transition(vtos, ltos);
1052 #ifdef AARCH64
1053   __ ldr(R0_tos, laddress(n));
1054 #else
1055   __ ldr(R0_tos_lo, laddress(n));
1056   __ ldr(R1_tos_hi, haddress(n));
1057 #endif // AARCH64
1058 }
1059 
1060 
1061 void TemplateTable::fload(int n) {
1062   transition(vtos, ftos);
1063 #ifdef __SOFTFP__
1064   __ ldr(R0_tos, faddress(n));
1065 #else
1066   __ ldr_float(S0_tos, faddress(n));
1067 #endif // __SOFTFP__
1068 }
1069 
1070 
1071 void TemplateTable::dload(int n) {
1072   transition(vtos, dtos);
1073 #ifdef __SOFTFP__
1074   __ ldr(R0_tos_lo, laddress(n));
1075   __ ldr(R1_tos_hi, haddress(n));
1076 #else
1077   __ ldr_double(D0_tos, daddress(n));
1078 #endif // __SOFTFP__
1079 }
1080 
1081 
1082 void TemplateTable::aload(int n) {
1083   transition(vtos, atos);
1084   __ ldr(R0_tos, aaddress(n));
1085 }
1086 
1087 void TemplateTable::aload_0() {
1088   aload_0_internal();
1089 }
1090 
1091 void TemplateTable::nofast_aload_0() {
1092   aload_0_internal(may_not_rewrite);
1093 }
1094 
1095 void TemplateTable::aload_0_internal(RewriteControl rc) {
1096   transition(vtos, atos);
1097   // According to bytecode histograms, the pairs:
1098   //
1099   // _aload_0, _fast_igetfield
1100   // _aload_0, _fast_agetfield
1101   // _aload_0, _fast_fgetfield
1102   //
1103   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
1104   // bytecode checks if the next bytecode is either _fast_igetfield,
1105   // _fast_agetfield or _fast_fgetfield and then rewrites the
1106   // current bytecode into a pair bytecode; otherwise it rewrites the current
1107   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
1108   //
1109   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
1110   //       otherwise we may miss an opportunity for a pair.
1111   //
1112   // Also rewrite frequent pairs
1113   //   aload_0, aload_1
1114   //   aload_0, iload_1
1115   // These bytecodes with a small amount of code are most profitable to rewrite
1116   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
1117     Label rewrite, done;
1118     const Register next_bytecode = R1_tmp;
1119     const Register target_bytecode = R2_tmp;
1120 
1121     // get next byte
1122     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
1123 
1124     // if _getfield then wait with rewrite
1125     __ cmp(next_bytecode, Bytecodes::_getfield);
1126     __ b(done, eq);
1127 
1128     // if _igetfield then rewrite to _fast_iaccess_0
1129     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1130     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1131     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1132     __ b(rewrite, eq);
1133 
1134     // if _agetfield then rewrite to _fast_aaccess_0
1135     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1136     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1137     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1138     __ b(rewrite, eq);
1139 
1140     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1141     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1142     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1143 
1144     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1145 #ifdef AARCH64
1146     __ mov(Rtemp, Bytecodes::_fast_faccess_0);
1147     __ mov(target_bytecode, Bytecodes::_fast_aload_0);
1148     __ mov(target_bytecode, Rtemp, eq);
1149 #else
1150     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1151     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1152 #endif // AARCH64
1153 
1154     // rewrite
1155     __ bind(rewrite);
1156     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1157 
1158     __ bind(done);
1159   }
1160 
1161   aload(0);
1162 }
1163 
1164 void TemplateTable::istore() {
1165   transition(itos, vtos);
1166   const Register Rlocal_index = R2_tmp;
1167 
1168   locals_index(Rlocal_index);
1169   Address local = load_iaddress(Rlocal_index, Rtemp);
1170   __ str_32(R0_tos, local);
1171 }
1172 
1173 
1174 void TemplateTable::lstore() {
1175   transition(ltos, vtos);
1176   const Register Rlocal_index = R2_tmp;
1177 
1178   locals_index(Rlocal_index);
1179   store_category2_local(Rlocal_index, R3_tmp);
1180 }
1181 
1182 
1183 void TemplateTable::fstore() {
1184   transition(ftos, vtos);
1185   const Register Rlocal_index = R2_tmp;
1186 
1187   locals_index(Rlocal_index);
1188   Address local = load_faddress(Rlocal_index, Rtemp);
1189 #ifdef __SOFTFP__
1190   __ str(R0_tos, local);
1191 #else
1192   __ str_float(S0_tos, local);
1193 #endif // __SOFTFP__
1194 }
1195 
1196 
1197 void TemplateTable::dstore() {
1198   transition(dtos, vtos);
1199   const Register Rlocal_index = R2_tmp;
1200 
1201   locals_index(Rlocal_index);
1202 
1203 #ifdef __SOFTFP__
1204   store_category2_local(Rlocal_index, R3_tmp);
1205 #else
1206   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1207 #endif // __SOFTFP__
1208 }
1209 
1210 
1211 void TemplateTable::astore() {
1212   transition(vtos, vtos);
1213   const Register Rlocal_index = R1_tmp;
1214 
1215   __ pop_ptr(R0_tos);
1216   locals_index(Rlocal_index);
1217   Address local = load_aaddress(Rlocal_index, Rtemp);
1218   __ str(R0_tos, local);
1219 }
1220 
1221 
1222 void TemplateTable::wide_istore() {
1223   transition(vtos, vtos);
1224   const Register Rlocal_index = R2_tmp;
1225 
1226   __ pop_i(R0_tos);
1227   locals_index_wide(Rlocal_index);
1228   Address local = load_iaddress(Rlocal_index, Rtemp);
1229   __ str_32(R0_tos, local);
1230 }
1231 
1232 
1233 void TemplateTable::wide_lstore() {
1234   transition(vtos, vtos);
1235   const Register Rlocal_index = R2_tmp;
1236   const Register Rlocal_base = R3_tmp;
1237 
1238 #ifdef AARCH64
1239   __ pop_l(R0_tos);
1240 #else
1241   __ pop_l(R0_tos_lo, R1_tos_hi);
1242 #endif // AARCH64
1243 
1244   locals_index_wide(Rlocal_index);
1245   store_category2_local(Rlocal_index, R3_tmp);
1246 }
1247 
1248 
1249 void TemplateTable::wide_fstore() {
1250   wide_istore();
1251 }
1252 
1253 
1254 void TemplateTable::wide_dstore() {
1255   wide_lstore();
1256 }
1257 
1258 
1259 void TemplateTable::wide_astore() {
1260   transition(vtos, vtos);
1261   const Register Rlocal_index = R2_tmp;
1262 
1263   __ pop_ptr(R0_tos);
1264   locals_index_wide(Rlocal_index);
1265   Address local = load_aaddress(Rlocal_index, Rtemp);
1266   __ str(R0_tos, local);
1267 }
1268 
1269 
1270 void TemplateTable::iastore() {
1271   transition(itos, vtos);
1272   const Register Rindex = R4_tmp; // index_check prefers index in R4
1273   const Register Rarray = R3_tmp;
1274   // R0_tos: value
1275 
1276   __ pop_i(Rindex);
1277   index_check(Rarray, Rindex);
1278   __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1279 }
1280 
1281 
1282 void TemplateTable::lastore() {
1283   transition(ltos, vtos);
1284   const Register Rindex = R4_tmp; // index_check prefers index in R4
1285   const Register Rarray = R3_tmp;
1286   // R0_tos_lo:R1_tos_hi: value
1287 
1288   __ pop_i(Rindex);
1289   index_check(Rarray, Rindex);
1290 
1291 #ifdef AARCH64
1292   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1293 #else
1294   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1295   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1296   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1297 #endif // AARCH64
1298 }
1299 
1300 
1301 void TemplateTable::fastore() {
1302   transition(ftos, vtos);
1303   const Register Rindex = R4_tmp; // index_check prefers index in R4
1304   const Register Rarray = R3_tmp;
1305   // S0_tos/R0_tos: value
1306 
1307   __ pop_i(Rindex);
1308   index_check(Rarray, Rindex);
1309   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1310 
1311 #ifdef __SOFTFP__
1312   __ str(R0_tos, addr);
1313 #else
1314   __ str_float(S0_tos, addr);
1315 #endif // __SOFTFP__
1316 }
1317 
1318 
1319 void TemplateTable::dastore() {
1320   transition(dtos, vtos);
1321   const Register Rindex = R4_tmp; // index_check prefers index in R4
1322   const Register Rarray = R3_tmp;
1323   // D0_tos / R0_tos_lo:R1_to_hi: value
1324 
1325   __ pop_i(Rindex);
1326   index_check(Rarray, Rindex);
1327 
1328 #ifdef __SOFTFP__
1329   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1330   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1331   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1332 #else
1333   __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1334 #endif // __SOFTFP__
1335 }
1336 
1337 
1338 void TemplateTable::aastore() {
1339   transition(vtos, vtos);
1340   Label is_null, throw_array_store, done;
1341 
1342   const Register Raddr_1   = R1_tmp;
1343   const Register Rvalue_2  = R2_tmp;
1344   const Register Rarray_3  = R3_tmp;
1345   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1346   const Register Rsub_5    = R5_tmp;
1347   const Register Rsuper_LR = LR_tmp;
1348 
1349   // stack: ..., array, index, value
1350   __ ldr(Rvalue_2, at_tos());     // Value
1351   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1352   __ ldr(Rarray_3, at_tos_p2());  // Array
1353 
1354   index_check_without_pop(Rarray_3, Rindex_4);
1355 
1356   // Compute the array base
1357   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1358 
1359   // do array store check - check for NULL value first
1360   __ cbz(Rvalue_2, is_null);
1361 
1362   // Load subklass
1363   __ load_klass(Rsub_5, Rvalue_2);
1364   // Load superklass
1365   __ load_klass(Rtemp, Rarray_3);
1366   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1367 
1368   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1369   // Come here on success
1370 
1371   // Store value
1372   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1373 
1374   // Now store using the appropriate barrier
1375   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, false);
1376   __ b(done);
1377 
1378   __ bind(throw_array_store);
1379 
1380   // Come here on failure of subtype check
1381   __ profile_typecheck_failed(R0_tmp);
1382 
1383   // object is at TOS
1384   __ b(Interpreter::_throw_ArrayStoreException_entry);
1385 
1386   // Have a NULL in Rvalue_2, store NULL at array[index].
1387   __ bind(is_null);
1388   __ profile_null_seen(R0_tmp);
1389 
1390   // Store a NULL
1391   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, true);
1392 
1393   // Pop stack arguments
1394   __ bind(done);
1395   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1396 }
1397 
1398 
1399 void TemplateTable::bastore() {
1400   transition(itos, vtos);
1401   const Register Rindex = R4_tmp; // index_check prefers index in R4
1402   const Register Rarray = R3_tmp;
1403   // R0_tos: value
1404 
1405   __ pop_i(Rindex);
1406   index_check(Rarray, Rindex);
1407 
1408   // Need to check whether array is boolean or byte
1409   // since both types share the bastore bytecode.
1410   __ load_klass(Rtemp, Rarray);
1411   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1412   Label L_skip;
1413   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1414   __ b(L_skip, eq);
1415   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1416   __ bind(L_skip);
1417   __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1418 }
1419 
1420 
1421 void TemplateTable::castore() {
1422   transition(itos, vtos);
1423   const Register Rindex = R4_tmp; // index_check prefers index in R4
1424   const Register Rarray = R3_tmp;
1425   // R0_tos: value
1426 
1427   __ pop_i(Rindex);
1428   index_check(Rarray, Rindex);
1429 
1430   __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1431 }
1432 
1433 
1434 void TemplateTable::sastore() {
1435   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1436            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1437          "base offsets for char and short should be equal");
1438   castore();
1439 }
1440 
1441 
1442 void TemplateTable::istore(int n) {
1443   transition(itos, vtos);
1444   __ str_32(R0_tos, iaddress(n));
1445 }
1446 
1447 
1448 void TemplateTable::lstore(int n) {
1449   transition(ltos, vtos);
1450 #ifdef AARCH64
1451   __ str(R0_tos, laddress(n));
1452 #else
1453   __ str(R0_tos_lo, laddress(n));
1454   __ str(R1_tos_hi, haddress(n));
1455 #endif // AARCH64
1456 }
1457 
1458 
1459 void TemplateTable::fstore(int n) {
1460   transition(ftos, vtos);
1461 #ifdef __SOFTFP__
1462   __ str(R0_tos, faddress(n));
1463 #else
1464   __ str_float(S0_tos, faddress(n));
1465 #endif // __SOFTFP__
1466 }
1467 
1468 
1469 void TemplateTable::dstore(int n) {
1470   transition(dtos, vtos);
1471 #ifdef __SOFTFP__
1472   __ str(R0_tos_lo, laddress(n));
1473   __ str(R1_tos_hi, haddress(n));
1474 #else
1475   __ str_double(D0_tos, daddress(n));
1476 #endif // __SOFTFP__
1477 }
1478 
1479 
1480 void TemplateTable::astore(int n) {
1481   transition(vtos, vtos);
1482   __ pop_ptr(R0_tos);
1483   __ str(R0_tos, aaddress(n));
1484 }
1485 
1486 
1487 void TemplateTable::pop() {
1488   transition(vtos, vtos);
1489   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1490 }
1491 
1492 
1493 void TemplateTable::pop2() {
1494   transition(vtos, vtos);
1495   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1496 }
1497 
1498 
1499 void TemplateTable::dup() {
1500   transition(vtos, vtos);
1501   // stack: ..., a
1502   __ load_ptr(0, R0_tmp);
1503   __ push_ptr(R0_tmp);
1504   // stack: ..., a, a
1505 }
1506 
1507 
1508 void TemplateTable::dup_x1() {
1509   transition(vtos, vtos);
1510   // stack: ..., a, b
1511   __ load_ptr(0, R0_tmp);  // load b
1512   __ load_ptr(1, R2_tmp);  // load a
1513   __ store_ptr(1, R0_tmp); // store b
1514   __ store_ptr(0, R2_tmp); // store a
1515   __ push_ptr(R0_tmp);     // push b
1516   // stack: ..., b, a, b
1517 }
1518 
1519 
1520 void TemplateTable::dup_x2() {
1521   transition(vtos, vtos);
1522   // stack: ..., a, b, c
1523   __ load_ptr(0, R0_tmp);   // load c
1524   __ load_ptr(1, R2_tmp);   // load b
1525   __ load_ptr(2, R4_tmp);   // load a
1526 
1527   __ push_ptr(R0_tmp);      // push c
1528 
1529   // stack: ..., a, b, c, c
1530   __ store_ptr(1, R2_tmp);  // store b
1531   __ store_ptr(2, R4_tmp);  // store a
1532   __ store_ptr(3, R0_tmp);  // store c
1533   // stack: ..., c, a, b, c
1534 }
1535 
1536 
1537 void TemplateTable::dup2() {
1538   transition(vtos, vtos);
1539   // stack: ..., a, b
1540   __ load_ptr(1, R0_tmp);  // load a
1541   __ push_ptr(R0_tmp);     // push a
1542   __ load_ptr(1, R0_tmp);  // load b
1543   __ push_ptr(R0_tmp);     // push b
1544   // stack: ..., a, b, a, b
1545 }
1546 
1547 
1548 void TemplateTable::dup2_x1() {
1549   transition(vtos, vtos);
1550 
1551   // stack: ..., a, b, c
1552   __ load_ptr(0, R4_tmp);  // load c
1553   __ load_ptr(1, R2_tmp);  // load b
1554   __ load_ptr(2, R0_tmp);  // load a
1555 
1556   __ push_ptr(R2_tmp);     // push b
1557   __ push_ptr(R4_tmp);     // push c
1558 
1559   // stack: ..., a, b, c, b, c
1560 
1561   __ store_ptr(2, R0_tmp);  // store a
1562   __ store_ptr(3, R4_tmp);  // store c
1563   __ store_ptr(4, R2_tmp);  // store b
1564 
1565   // stack: ..., b, c, a, b, c
1566 }
1567 
1568 
1569 void TemplateTable::dup2_x2() {
1570   transition(vtos, vtos);
1571   // stack: ..., a, b, c, d
1572   __ load_ptr(0, R0_tmp);  // load d
1573   __ load_ptr(1, R2_tmp);  // load c
1574   __ push_ptr(R2_tmp);     // push c
1575   __ push_ptr(R0_tmp);     // push d
1576   // stack: ..., a, b, c, d, c, d
1577   __ load_ptr(4, R4_tmp);  // load b
1578   __ store_ptr(4, R0_tmp); // store d in b
1579   __ store_ptr(2, R4_tmp); // store b in d
1580   // stack: ..., a, d, c, b, c, d
1581   __ load_ptr(5, R4_tmp);  // load a
1582   __ store_ptr(5, R2_tmp); // store c in a
1583   __ store_ptr(3, R4_tmp); // store a in c
1584   // stack: ..., c, d, a, b, c, d
1585 }
1586 
1587 
1588 void TemplateTable::swap() {
1589   transition(vtos, vtos);
1590   // stack: ..., a, b
1591   __ load_ptr(1, R0_tmp);  // load a
1592   __ load_ptr(0, R2_tmp);  // load b
1593   __ store_ptr(0, R0_tmp); // store a in b
1594   __ store_ptr(1, R2_tmp); // store b in a
1595   // stack: ..., b, a
1596 }
1597 
1598 
1599 void TemplateTable::iop2(Operation op) {
1600   transition(itos, itos);
1601   const Register arg1 = R1_tmp;
1602   const Register arg2 = R0_tos;
1603 
1604   __ pop_i(arg1);
1605   switch (op) {
1606     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1607     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1608     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1609     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1610     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1611     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1612 #ifdef AARCH64
1613     case shl  : __ lslv_w (R0_tos, arg1, arg2); break;
1614     case shr  : __ asrv_w (R0_tos, arg1, arg2); break;
1615     case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1616 #else
1617     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1618     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1619     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1620 #endif // AARCH64
1621     default   : ShouldNotReachHere();
1622   }
1623 }
1624 
1625 
1626 void TemplateTable::lop2(Operation op) {
1627   transition(ltos, ltos);
1628 #ifdef AARCH64
1629   const Register arg1 = R1_tmp;
1630   const Register arg2 = R0_tos;
1631 
1632   __ pop_l(arg1);
1633   switch (op) {
1634     case add  : __ add (R0_tos, arg1, arg2); break;
1635     case sub  : __ sub (R0_tos, arg1, arg2); break;
1636     case _and : __ andr(R0_tos, arg1, arg2); break;
1637     case _or  : __ orr (R0_tos, arg1, arg2); break;
1638     case _xor : __ eor (R0_tos, arg1, arg2); break;
1639     default   : ShouldNotReachHere();
1640   }
1641 #else
1642   const Register arg1_lo = R2_tmp;
1643   const Register arg1_hi = R3_tmp;
1644   const Register arg2_lo = R0_tos_lo;
1645   const Register arg2_hi = R1_tos_hi;
1646 
1647   __ pop_l(arg1_lo, arg1_hi);
1648   switch (op) {
1649     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1650     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1651     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1652     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1653     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1654     default : ShouldNotReachHere();
1655   }
1656 #endif // AARCH64
1657 }
1658 
1659 
1660 void TemplateTable::idiv() {
1661   transition(itos, itos);
1662 #ifdef AARCH64
1663   const Register divisor = R0_tos;
1664   const Register dividend = R1_tmp;
1665 
1666   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1667   __ pop_i(dividend);
1668   __ sdiv_w(R0_tos, dividend, divisor);
1669 #else
1670   __ mov(R2, R0_tos);
1671   __ pop_i(R0);
1672   // R0 - dividend
1673   // R2 - divisor
1674   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1675   // R1 - result
1676   __ mov(R0_tos, R1);
1677 #endif // AARCH64
1678 }
1679 
1680 
1681 void TemplateTable::irem() {
1682   transition(itos, itos);
1683 #ifdef AARCH64
1684   const Register divisor = R0_tos;
1685   const Register dividend = R1_tmp;
1686   const Register quotient = R2_tmp;
1687 
1688   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1689   __ pop_i(dividend);
1690   __ sdiv_w(quotient, dividend, divisor);
1691   __ msub_w(R0_tos, divisor, quotient, dividend);
1692 #else
1693   __ mov(R2, R0_tos);
1694   __ pop_i(R0);
1695   // R0 - dividend
1696   // R2 - divisor
1697   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1698   // R0 - remainder
1699 #endif // AARCH64
1700 }
1701 
1702 
1703 void TemplateTable::lmul() {
1704   transition(ltos, ltos);
1705 #ifdef AARCH64
1706   const Register arg1 = R0_tos;
1707   const Register arg2 = R1_tmp;
1708 
1709   __ pop_l(arg2);
1710   __ mul(R0_tos, arg1, arg2);
1711 #else
1712   const Register arg1_lo = R0_tos_lo;
1713   const Register arg1_hi = R1_tos_hi;
1714   const Register arg2_lo = R2_tmp;
1715   const Register arg2_hi = R3_tmp;
1716 
1717   __ pop_l(arg2_lo, arg2_hi);
1718 
1719   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1720 #endif // AARCH64
1721 }
1722 
1723 
1724 void TemplateTable::ldiv() {
1725   transition(ltos, ltos);
1726 #ifdef AARCH64
1727   const Register divisor = R0_tos;
1728   const Register dividend = R1_tmp;
1729 
1730   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1731   __ pop_l(dividend);
1732   __ sdiv(R0_tos, dividend, divisor);
1733 #else
1734   const Register x_lo = R2_tmp;
1735   const Register x_hi = R3_tmp;
1736   const Register y_lo = R0_tos_lo;
1737   const Register y_hi = R1_tos_hi;
1738 
1739   __ pop_l(x_lo, x_hi);
1740 
1741   // check if y = 0
1742   __ orrs(Rtemp, y_lo, y_hi);
1743   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1744   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1745 #endif // AARCH64
1746 }
1747 
1748 
1749 void TemplateTable::lrem() {
1750   transition(ltos, ltos);
1751 #ifdef AARCH64
1752   const Register divisor = R0_tos;
1753   const Register dividend = R1_tmp;
1754   const Register quotient = R2_tmp;
1755 
1756   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1757   __ pop_l(dividend);
1758   __ sdiv(quotient, dividend, divisor);
1759   __ msub(R0_tos, divisor, quotient, dividend);
1760 #else
1761   const Register x_lo = R2_tmp;
1762   const Register x_hi = R3_tmp;
1763   const Register y_lo = R0_tos_lo;
1764   const Register y_hi = R1_tos_hi;
1765 
1766   __ pop_l(x_lo, x_hi);
1767 
1768   // check if y = 0
1769   __ orrs(Rtemp, y_lo, y_hi);
1770   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1771   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1772 #endif // AARCH64
1773 }
1774 
1775 
1776 void TemplateTable::lshl() {
1777   transition(itos, ltos);
1778 #ifdef AARCH64
1779   const Register val = R1_tmp;
1780   const Register shift_cnt = R0_tos;
1781   __ pop_l(val);
1782   __ lslv(R0_tos, val, shift_cnt);
1783 #else
1784   const Register shift_cnt = R4_tmp;
1785   const Register val_lo = R2_tmp;
1786   const Register val_hi = R3_tmp;
1787 
1788   __ pop_l(val_lo, val_hi);
1789   __ andr(shift_cnt, R0_tos, 63);
1790   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1791 #endif // AARCH64
1792 }
1793 
1794 
1795 void TemplateTable::lshr() {
1796   transition(itos, ltos);
1797 #ifdef AARCH64
1798   const Register val = R1_tmp;
1799   const Register shift_cnt = R0_tos;
1800   __ pop_l(val);
1801   __ asrv(R0_tos, val, shift_cnt);
1802 #else
1803   const Register shift_cnt = R4_tmp;
1804   const Register val_lo = R2_tmp;
1805   const Register val_hi = R3_tmp;
1806 
1807   __ pop_l(val_lo, val_hi);
1808   __ andr(shift_cnt, R0_tos, 63);
1809   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1810 #endif // AARCH64
1811 }
1812 
1813 
1814 void TemplateTable::lushr() {
1815   transition(itos, ltos);
1816 #ifdef AARCH64
1817   const Register val = R1_tmp;
1818   const Register shift_cnt = R0_tos;
1819   __ pop_l(val);
1820   __ lsrv(R0_tos, val, shift_cnt);
1821 #else
1822   const Register shift_cnt = R4_tmp;
1823   const Register val_lo = R2_tmp;
1824   const Register val_hi = R3_tmp;
1825 
1826   __ pop_l(val_lo, val_hi);
1827   __ andr(shift_cnt, R0_tos, 63);
1828   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1829 #endif // AARCH64
1830 }
1831 
1832 
1833 void TemplateTable::fop2(Operation op) {
1834   transition(ftos, ftos);
1835 #ifdef __SOFTFP__
1836   __ mov(R1, R0_tos);
1837   __ pop_i(R0);
1838   switch (op) {
1839     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1840     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1841     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1842     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1843     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1844     default : ShouldNotReachHere();
1845   }
1846 #else
1847   const FloatRegister arg1 = S1_tmp;
1848   const FloatRegister arg2 = S0_tos;
1849 
1850   switch (op) {
1851     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1852     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1853     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1854     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1855     case rem:
1856 #ifndef __ABI_HARD__
1857       __ pop_f(arg1);
1858       __ fmrs(R0, arg1);
1859       __ fmrs(R1, arg2);
1860       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1861       __ fmsr(S0_tos, R0);
1862 #else
1863       __ mov_float(S1_reg, arg2);
1864       __ pop_f(S0);
1865       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1866 #endif // !__ABI_HARD__
1867       break;
1868     default : ShouldNotReachHere();
1869   }
1870 #endif // __SOFTFP__
1871 }
1872 
1873 
1874 void TemplateTable::dop2(Operation op) {
1875   transition(dtos, dtos);
1876 #ifdef __SOFTFP__
1877   __ mov(R2, R0_tos_lo);
1878   __ mov(R3, R1_tos_hi);
1879   __ pop_l(R0, R1);
1880   switch (op) {
1881     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1882     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1883     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1884     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1885     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1886     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1887     default : ShouldNotReachHere();
1888   }
1889 #else
1890   const FloatRegister arg1 = D1_tmp;
1891   const FloatRegister arg2 = D0_tos;
1892 
1893   switch (op) {
1894     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1895     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1896     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1897     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1898     case rem:
1899 #ifndef __ABI_HARD__
1900       __ pop_d(arg1);
1901       __ fmrrd(R0, R1, arg1);
1902       __ fmrrd(R2, R3, arg2);
1903       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1904       __ fmdrr(D0_tos, R0, R1);
1905 #else
1906       __ mov_double(D1, arg2);
1907       __ pop_d(D0);
1908       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1909 #endif // !__ABI_HARD__
1910       break;
1911     default : ShouldNotReachHere();
1912   }
1913 #endif // __SOFTFP__
1914 }
1915 
1916 
1917 void TemplateTable::ineg() {
1918   transition(itos, itos);
1919   __ neg_32(R0_tos, R0_tos);
1920 }
1921 
1922 
1923 void TemplateTable::lneg() {
1924   transition(ltos, ltos);
1925 #ifdef AARCH64
1926   __ neg(R0_tos, R0_tos);
1927 #else
1928   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1929   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1930 #endif // AARCH64
1931 }
1932 
1933 
1934 void TemplateTable::fneg() {
1935   transition(ftos, ftos);
1936 #ifdef __SOFTFP__
1937   // Invert sign bit
1938   const int sign_mask = 0x80000000;
1939   __ eor(R0_tos, R0_tos, sign_mask);
1940 #else
1941   __ neg_float(S0_tos, S0_tos);
1942 #endif // __SOFTFP__
1943 }
1944 
1945 
1946 void TemplateTable::dneg() {
1947   transition(dtos, dtos);
1948 #ifdef __SOFTFP__
1949   // Invert sign bit in the high part of the double
1950   const int sign_mask_hi = 0x80000000;
1951   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1952 #else
1953   __ neg_double(D0_tos, D0_tos);
1954 #endif // __SOFTFP__
1955 }
1956 
1957 
1958 void TemplateTable::iinc() {
1959   transition(vtos, vtos);
1960   const Register Rconst = R2_tmp;
1961   const Register Rlocal_index = R1_tmp;
1962   const Register Rval = R0_tmp;
1963 
1964   __ ldrsb(Rconst, at_bcp(2));
1965   locals_index(Rlocal_index);
1966   Address local = load_iaddress(Rlocal_index, Rtemp);
1967   __ ldr_s32(Rval, local);
1968   __ add(Rval, Rval, Rconst);
1969   __ str_32(Rval, local);
1970 }
1971 
1972 
1973 void TemplateTable::wide_iinc() {
1974   transition(vtos, vtos);
1975   const Register Rconst = R2_tmp;
1976   const Register Rlocal_index = R1_tmp;
1977   const Register Rval = R0_tmp;
1978 
1979   // get constant in Rconst
1980   __ ldrsb(R2_tmp, at_bcp(4));
1981   __ ldrb(R3_tmp, at_bcp(5));
1982   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1983 
1984   locals_index_wide(Rlocal_index);
1985   Address local = load_iaddress(Rlocal_index, Rtemp);
1986   __ ldr_s32(Rval, local);
1987   __ add(Rval, Rval, Rconst);
1988   __ str_32(Rval, local);
1989 }
1990 
1991 
1992 void TemplateTable::convert() {
1993   // Checking
1994 #ifdef ASSERT
1995   { TosState tos_in  = ilgl;
1996     TosState tos_out = ilgl;
1997     switch (bytecode()) {
1998       case Bytecodes::_i2l: // fall through
1999       case Bytecodes::_i2f: // fall through
2000       case Bytecodes::_i2d: // fall through
2001       case Bytecodes::_i2b: // fall through
2002       case Bytecodes::_i2c: // fall through
2003       case Bytecodes::_i2s: tos_in = itos; break;
2004       case Bytecodes::_l2i: // fall through
2005       case Bytecodes::_l2f: // fall through
2006       case Bytecodes::_l2d: tos_in = ltos; break;
2007       case Bytecodes::_f2i: // fall through
2008       case Bytecodes::_f2l: // fall through
2009       case Bytecodes::_f2d: tos_in = ftos; break;
2010       case Bytecodes::_d2i: // fall through
2011       case Bytecodes::_d2l: // fall through
2012       case Bytecodes::_d2f: tos_in = dtos; break;
2013       default             : ShouldNotReachHere();
2014     }
2015     switch (bytecode()) {
2016       case Bytecodes::_l2i: // fall through
2017       case Bytecodes::_f2i: // fall through
2018       case Bytecodes::_d2i: // fall through
2019       case Bytecodes::_i2b: // fall through
2020       case Bytecodes::_i2c: // fall through
2021       case Bytecodes::_i2s: tos_out = itos; break;
2022       case Bytecodes::_i2l: // fall through
2023       case Bytecodes::_f2l: // fall through
2024       case Bytecodes::_d2l: tos_out = ltos; break;
2025       case Bytecodes::_i2f: // fall through
2026       case Bytecodes::_l2f: // fall through
2027       case Bytecodes::_d2f: tos_out = ftos; break;
2028       case Bytecodes::_i2d: // fall through
2029       case Bytecodes::_l2d: // fall through
2030       case Bytecodes::_f2d: tos_out = dtos; break;
2031       default             : ShouldNotReachHere();
2032     }
2033     transition(tos_in, tos_out);
2034   }
2035 #endif // ASSERT
2036 
2037   // Conversion
2038   switch (bytecode()) {
2039     case Bytecodes::_i2l:
2040 #ifdef AARCH64
2041       __ sign_extend(R0_tos, R0_tos, 32);
2042 #else
2043       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
2044 #endif // AARCH64
2045       break;
2046 
2047     case Bytecodes::_i2f:
2048 #ifdef AARCH64
2049       __ scvtf_sw(S0_tos, R0_tos);
2050 #else
2051 #ifdef __SOFTFP__
2052       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
2053 #else
2054       __ fmsr(S0_tmp, R0_tos);
2055       __ fsitos(S0_tos, S0_tmp);
2056 #endif // __SOFTFP__
2057 #endif // AARCH64
2058       break;
2059 
2060     case Bytecodes::_i2d:
2061 #ifdef AARCH64
2062       __ scvtf_dw(D0_tos, R0_tos);
2063 #else
2064 #ifdef __SOFTFP__
2065       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
2066 #else
2067       __ fmsr(S0_tmp, R0_tos);
2068       __ fsitod(D0_tos, S0_tmp);
2069 #endif // __SOFTFP__
2070 #endif // AARCH64
2071       break;
2072 
2073     case Bytecodes::_i2b:
2074       __ sign_extend(R0_tos, R0_tos, 8);
2075       break;
2076 
2077     case Bytecodes::_i2c:
2078       __ zero_extend(R0_tos, R0_tos, 16);
2079       break;
2080 
2081     case Bytecodes::_i2s:
2082       __ sign_extend(R0_tos, R0_tos, 16);
2083       break;
2084 
2085     case Bytecodes::_l2i:
2086       /* nothing to do */
2087       break;
2088 
2089     case Bytecodes::_l2f:
2090 #ifdef AARCH64
2091       __ scvtf_sx(S0_tos, R0_tos);
2092 #else
2093       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
2094 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
2095       __ fmsr(S0_tos, R0);
2096 #endif // !__SOFTFP__ && !__ABI_HARD__
2097 #endif // AARCH64
2098       break;
2099 
2100     case Bytecodes::_l2d:
2101 #ifdef AARCH64
2102       __ scvtf_dx(D0_tos, R0_tos);
2103 #else
2104       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
2105 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
2106       __ fmdrr(D0_tos, R0, R1);
2107 #endif // !__SOFTFP__ && !__ABI_HARD__
2108 #endif // AARCH64
2109       break;
2110 
2111     case Bytecodes::_f2i:
2112 #ifdef AARCH64
2113       __ fcvtzs_ws(R0_tos, S0_tos);
2114 #else
2115 #ifndef __SOFTFP__
2116       __ ftosizs(S0_tos, S0_tos);
2117       __ fmrs(R0_tos, S0_tos);
2118 #else
2119       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
2120 #endif // !__SOFTFP__
2121 #endif // AARCH64
2122       break;
2123 
2124     case Bytecodes::_f2l:
2125 #ifdef AARCH64
2126       __ fcvtzs_xs(R0_tos, S0_tos);
2127 #else
2128 #ifndef __SOFTFP__
2129       __ fmrs(R0_tos, S0_tos);
2130 #endif // !__SOFTFP__
2131       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
2132 #endif // AARCH64
2133       break;
2134 
2135     case Bytecodes::_f2d:
2136 #ifdef __SOFTFP__
2137       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
2138 #else
2139       __ convert_f2d(D0_tos, S0_tos);
2140 #endif // __SOFTFP__
2141       break;
2142 
2143     case Bytecodes::_d2i:
2144 #ifdef AARCH64
2145       __ fcvtzs_wd(R0_tos, D0_tos);
2146 #else
2147 #ifndef __SOFTFP__
2148       __ ftosizd(Stemp, D0);
2149       __ fmrs(R0, Stemp);
2150 #else
2151       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
2152 #endif // !__SOFTFP__
2153 #endif // AARCH64
2154       break;
2155 
2156     case Bytecodes::_d2l:
2157 #ifdef AARCH64
2158       __ fcvtzs_xd(R0_tos, D0_tos);
2159 #else
2160 #ifndef __SOFTFP__
2161       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
2162 #endif // !__SOFTFP__
2163       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
2164 #endif // AARCH64
2165       break;
2166 
2167     case Bytecodes::_d2f:
2168 #ifdef __SOFTFP__
2169       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
2170 #else
2171       __ convert_d2f(S0_tos, D0_tos);
2172 #endif // __SOFTFP__
2173       break;
2174 
2175     default:
2176       ShouldNotReachHere();
2177   }
2178 }
2179 
2180 
2181 void TemplateTable::lcmp() {
2182   transition(ltos, itos);
2183 #ifdef AARCH64
2184   const Register arg1 = R1_tmp;
2185   const Register arg2 = R0_tos;
2186 
2187   __ pop_l(arg1);
2188 
2189   __ cmp(arg1, arg2);
2190   __ cset(R0_tos, gt);               // 1 if '>', else 0
2191   __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2192 #else
2193   const Register arg1_lo = R2_tmp;
2194   const Register arg1_hi = R3_tmp;
2195   const Register arg2_lo = R0_tos_lo;
2196   const Register arg2_hi = R1_tos_hi;
2197   const Register res = R4_tmp;
2198 
2199   __ pop_l(arg1_lo, arg1_hi);
2200 
2201   // long compare arg1 with arg2
2202   // result is -1/0/+1 if '<'/'='/'>'
2203   Label done;
2204 
2205   __ mov (res, 0);
2206   __ cmp (arg1_hi, arg2_hi);
2207   __ mvn (res, 0, lt);
2208   __ mov (res, 1, gt);
2209   __ b(done, ne);
2210   __ cmp (arg1_lo, arg2_lo);
2211   __ mvn (res, 0, lo);
2212   __ mov (res, 1, hi);
2213   __ bind(done);
2214   __ mov (R0_tos, res);
2215 #endif // AARCH64
2216 }
2217 
2218 
2219 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2220   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2221 
2222 #ifdef AARCH64
2223   if (is_float) {
2224     transition(ftos, itos);
2225     __ pop_f(S1_tmp);
2226     __ fcmp_s(S1_tmp, S0_tos);
2227   } else {
2228     transition(dtos, itos);
2229     __ pop_d(D1_tmp);
2230     __ fcmp_d(D1_tmp, D0_tos);
2231   }
2232 
2233   if (unordered_result < 0) {
2234     __ cset(R0_tos, gt);               // 1 if '>', else 0
2235     __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2236   } else {
2237     __ cset(R0_tos, hi);               // 1 if '>' or unordered, else 0
2238     __ csinv(R0_tos, R0_tos, ZR, pl);  // previous value if '>=' or unordered, else -1
2239   }
2240 
2241 #else
2242 
2243 #ifdef __SOFTFP__
2244 
2245   if (is_float) {
2246     transition(ftos, itos);
2247     const Register Rx = R0;
2248     const Register Ry = R1;
2249 
2250     __ mov(Ry, R0_tos);
2251     __ pop_i(Rx);
2252 
2253     if (unordered_result == 1) {
2254       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2255     } else {
2256       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2257     }
2258 
2259   } else {
2260 
2261     transition(dtos, itos);
2262     const Register Rx_lo = R0;
2263     const Register Rx_hi = R1;
2264     const Register Ry_lo = R2;
2265     const Register Ry_hi = R3;
2266 
2267     __ mov(Ry_lo, R0_tos_lo);
2268     __ mov(Ry_hi, R1_tos_hi);
2269     __ pop_l(Rx_lo, Rx_hi);
2270 
2271     if (unordered_result == 1) {
2272       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2273     } else {
2274       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2275     }
2276   }
2277 
2278 #else
2279 
2280   if (is_float) {
2281     transition(ftos, itos);
2282     __ pop_f(S1_tmp);
2283     __ fcmps(S1_tmp, S0_tos);
2284   } else {
2285     transition(dtos, itos);
2286     __ pop_d(D1_tmp);
2287     __ fcmpd(D1_tmp, D0_tos);
2288   }
2289 
2290   __ fmstat();
2291 
2292   // comparison result | flag N | flag Z | flag C | flag V
2293   // "<"               |   1    |   0    |   0    |   0
2294   // "=="              |   0    |   1    |   1    |   0
2295   // ">"               |   0    |   0    |   1    |   0
2296   // unordered         |   0    |   0    |   1    |   1
2297 
2298   if (unordered_result < 0) {
2299     __ mov(R0_tos, 1);           // result ==  1 if greater
2300     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2301   } else {
2302     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2303     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2304   }
2305   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2306 #endif // __SOFTFP__
2307 #endif // AARCH64
2308 }
2309 
2310 
2311 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2312 
2313   const Register Rdisp = R0_tmp;
2314   const Register Rbumped_taken_count = R5_tmp;
2315 
2316   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2317 
2318   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2319                              InvocationCounter::counter_offset();
2320   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2321                               InvocationCounter::counter_offset();
2322   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2323 
2324   // Load up R0 with the branch displacement
2325   if (is_wide) {
2326     __ ldrsb(R0_tmp, at_bcp(1));
2327     __ ldrb(R1_tmp, at_bcp(2));
2328     __ ldrb(R2_tmp, at_bcp(3));
2329     __ ldrb(R3_tmp, at_bcp(4));
2330     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2331     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2332     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2333   } else {
2334     __ ldrsb(R0_tmp, at_bcp(1));
2335     __ ldrb(R1_tmp, at_bcp(2));
2336     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2337   }
2338 
2339   // Handle all the JSR stuff here, then exit.
2340   // It's much shorter and cleaner than intermingling with the
2341   // non-JSR normal-branch stuff occuring below.
2342   if (is_jsr) {
2343     // compute return address as bci in R1
2344     const Register Rret_addr = R1_tmp;
2345     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2346 
2347     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2348     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2349     __ sub(Rret_addr, Rret_addr, Rtemp);
2350 
2351     // Load the next target bytecode into R3_bytecode and advance Rbcp
2352 #ifdef AARCH64
2353     __ add(Rbcp, Rbcp, Rdisp);
2354     __ ldrb(R3_bytecode, Address(Rbcp));
2355 #else
2356     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2357 #endif // AARCH64
2358 
2359     // Push return address
2360     __ push_i(Rret_addr);
2361     // jsr returns vtos
2362     __ dispatch_only_noverify(vtos);
2363     return;
2364   }
2365 
2366   // Normal (non-jsr) branch handling
2367 
2368   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2369 #ifdef AARCH64
2370   __ add(Rbcp, Rbcp, Rdisp);
2371   __ ldrb(R3_bytecode, Address(Rbcp));
2372 #else
2373   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2374 #endif // AARCH64
2375 
2376   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2377   Label backedge_counter_overflow;
2378   Label profile_method;
2379   Label dispatch;
2380 
2381   if (UseLoopCounter) {
2382     // increment backedge counter for backward branches
2383     // Rdisp (R0): target offset
2384 
2385     const Register Rcnt = R2_tmp;
2386     const Register Rcounters = R1_tmp;
2387 
2388     // count only if backward branch
2389 #ifdef AARCH64
2390     __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2391 #else
2392     __ tst(Rdisp, Rdisp);
2393     __ b(dispatch, pl);
2394 #endif // AARCH64
2395 
2396     if (TieredCompilation) {
2397       Label no_mdo;
2398       int increment = InvocationCounter::count_increment;
2399       if (ProfileInterpreter) {
2400         // Are we profiling?
2401         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2402         __ cbz(Rtemp, no_mdo);
2403         // Increment the MDO backedge counter
2404         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2405                                                   in_bytes(InvocationCounter::counter_offset()));
2406         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2407         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2408                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2409         __ b(dispatch);
2410       }
2411       __ bind(no_mdo);
2412       // Increment backedge counter in MethodCounters*
2413       // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2414       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2415                              Rdisp, R3_bytecode,
2416                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2417       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2418       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2419                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2420     } else {
2421       // Increment backedge counter in MethodCounters*
2422       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2423                              Rdisp, R3_bytecode,
2424                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2425       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2426       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2427       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2428 
2429       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2430 #ifdef AARCH64
2431       __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value);  // and the status bits
2432 #else
2433       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2434 #endif // AARCH64
2435       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2436 
2437       if (ProfileInterpreter) {
2438         // Test to see if we should create a method data oop
2439         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2440         __ ldr_s32(Rtemp, profile_limit);
2441         __ cmp_32(Rcnt, Rtemp);
2442         __ b(dispatch, lt);
2443 
2444         // if no method data exists, go to profile method
2445         __ test_method_data_pointer(R4_tmp, profile_method);
2446 
2447         if (UseOnStackReplacement) {
2448           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2449           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2450           __ ldr_s32(Rtemp, backward_branch_limit);
2451           __ cmp(Rbumped_taken_count, Rtemp);
2452           __ b(dispatch, lo);
2453 
2454           // When ProfileInterpreter is on, the backedge_count comes from the
2455           // MethodData*, which value does not get reset on the call to
2456           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2457           // routine while the method is being compiled, add a second test to make
2458           // sure the overflow function is called only once every overflow_frequency.
2459           const int overflow_frequency = 1024;
2460 
2461 #ifdef AARCH64
2462           __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2463 #else
2464           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2465           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2466           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2467 #endif // AARCH64
2468 
2469           __ b(backedge_counter_overflow, eq);
2470         }
2471       } else {
2472         if (UseOnStackReplacement) {
2473           // check for overflow against Rcnt, which is the sum of the counters
2474           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2475           __ ldr_s32(Rtemp, backward_branch_limit);
2476           __ cmp_32(Rcnt, Rtemp);
2477           __ b(backedge_counter_overflow, hs);
2478 
2479         }
2480       }
2481     }
2482     __ bind(dispatch);
2483   }
2484 
2485   if (!UseOnStackReplacement) {
2486     __ bind(backedge_counter_overflow);
2487   }
2488 
2489   // continue with the bytecode @ target
2490   __ dispatch_only(vtos);
2491 
2492   if (UseLoopCounter) {
2493     if (ProfileInterpreter) {
2494       // Out-of-line code to allocate method data oop.
2495       __ bind(profile_method);
2496 
2497       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2498       __ set_method_data_pointer_for_bcp();
2499       // reload next bytecode
2500       __ ldrb(R3_bytecode, Address(Rbcp));
2501       __ b(dispatch);
2502     }
2503 
2504     if (UseOnStackReplacement) {
2505       // invocation counter overflow
2506       __ bind(backedge_counter_overflow);
2507 
2508       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2509       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2510 
2511       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2512       const Register Rnmethod = R0;
2513 
2514       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2515 
2516       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2517 
2518       // nmethod may have been invalidated (VM may block upon call_VM return)
2519       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2520       __ cmp(R1_tmp, nmethod::in_use);
2521       __ b(dispatch, ne);
2522 
2523       // We have the address of an on stack replacement routine in Rnmethod,
2524       // We need to prepare to execute the OSR method. First we must
2525       // migrate the locals and monitors off of the stack.
2526 
2527       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2528 
2529       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2530 
2531       // R0 is OSR buffer
2532 
2533       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2534       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2535 
2536 #ifdef AARCH64
2537       __ ldp(FP, LR, Address(FP));
2538       __ mov(SP, Rtemp);
2539 #else
2540       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2541       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2542 #endif // AARCH64
2543 
2544       __ jump(R1_tmp);
2545     }
2546   }
2547 }
2548 
2549 
2550 void TemplateTable::if_0cmp(Condition cc) {
2551   transition(itos, vtos);
2552   // assume branch is more often taken than not (loops use backward branches)
2553   Label not_taken;
2554 #ifdef AARCH64
2555   if (cc == equal) {
2556     __ cbnz_w(R0_tos, not_taken);
2557   } else if (cc == not_equal) {
2558     __ cbz_w(R0_tos, not_taken);
2559   } else {
2560     __ cmp_32(R0_tos, 0);
2561     __ b(not_taken, convNegCond(cc));
2562   }
2563 #else
2564   __ cmp_32(R0_tos, 0);
2565   __ b(not_taken, convNegCond(cc));
2566 #endif // AARCH64
2567   branch(false, false);
2568   __ bind(not_taken);
2569   __ profile_not_taken_branch(R0_tmp);
2570 }
2571 
2572 
2573 void TemplateTable::if_icmp(Condition cc) {
2574   transition(itos, vtos);
2575   // assume branch is more often taken than not (loops use backward branches)
2576   Label not_taken;
2577   __ pop_i(R1_tmp);
2578   __ cmp_32(R1_tmp, R0_tos);
2579   __ b(not_taken, convNegCond(cc));
2580   branch(false, false);
2581   __ bind(not_taken);
2582   __ profile_not_taken_branch(R0_tmp);
2583 }
2584 
2585 
2586 void TemplateTable::if_nullcmp(Condition cc) {
2587   transition(atos, vtos);
2588   assert(cc == equal || cc == not_equal, "invalid condition");
2589 
2590   // assume branch is more often taken than not (loops use backward branches)
2591   Label not_taken;
2592   if (cc == equal) {
2593     __ cbnz(R0_tos, not_taken);
2594   } else {
2595     __ cbz(R0_tos, not_taken);
2596   }
2597   branch(false, false);
2598   __ bind(not_taken);
2599   __ profile_not_taken_branch(R0_tmp);
2600 }
2601 
2602 
2603 void TemplateTable::if_acmp(Condition cc) {
2604   transition(atos, vtos);
2605   // assume branch is more often taken than not (loops use backward branches)
2606   Label not_taken;
2607   __ pop_ptr(R1_tmp);
2608   __ cmp(R1_tmp, R0_tos);
2609   __ b(not_taken, convNegCond(cc));
2610   branch(false, false);
2611   __ bind(not_taken);
2612   __ profile_not_taken_branch(R0_tmp);
2613 }
2614 
2615 
2616 void TemplateTable::ret() {
2617   transition(vtos, vtos);
2618   const Register Rlocal_index = R1_tmp;
2619   const Register Rret_bci = Rtmp_save0; // R4/R19
2620 
2621   locals_index(Rlocal_index);
2622   Address local = load_iaddress(Rlocal_index, Rtemp);
2623   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2624   __ profile_ret(Rtmp_save1, Rret_bci);
2625   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2626   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2627   __ add(Rbcp, Rtemp, Rret_bci);
2628   __ dispatch_next(vtos);
2629 }
2630 
2631 
2632 void TemplateTable::wide_ret() {
2633   transition(vtos, vtos);
2634   const Register Rlocal_index = R1_tmp;
2635   const Register Rret_bci = Rtmp_save0; // R4/R19
2636 
2637   locals_index_wide(Rlocal_index);
2638   Address local = load_iaddress(Rlocal_index, Rtemp);
2639   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2640   __ profile_ret(Rtmp_save1, Rret_bci);
2641   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2642   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2643   __ add(Rbcp, Rtemp, Rret_bci);
2644   __ dispatch_next(vtos);
2645 }
2646 
2647 
2648 void TemplateTable::tableswitch() {
2649   transition(itos, vtos);
2650 
2651   const Register Rindex  = R0_tos;
2652 #ifndef AARCH64
2653   const Register Rtemp2  = R1_tmp;
2654 #endif // !AARCH64
2655   const Register Rabcp   = R2_tmp;  // aligned bcp
2656   const Register Rlow    = R3_tmp;
2657   const Register Rhigh   = R4_tmp;
2658   const Register Roffset = R5_tmp;
2659 
2660   // align bcp
2661   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2662   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2663 
2664   // load lo & hi
2665 #ifdef AARCH64
2666   __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2667 #else
2668   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2669 #endif // AARCH64
2670   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2671   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2672 
2673   // compare index with high bound
2674   __ cmp_32(Rhigh, Rindex);
2675 
2676 #ifdef AARCH64
2677   Label default_case, do_dispatch;
2678   __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2679   __ b(default_case, lt);
2680 
2681   __ sub_w(Rindex, Rindex, Rlow);
2682   __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2683   if(ProfileInterpreter) {
2684     __ sxtw(Rindex, Rindex);
2685     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2686   }
2687   __ b(do_dispatch);
2688 
2689   __ bind(default_case);
2690   __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2691   if(ProfileInterpreter) {
2692     __ profile_switch_default(R0_tmp);
2693   }
2694 
2695   __ bind(do_dispatch);
2696 #else
2697 
2698   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2699   __ subs(Rindex, Rindex, Rlow, ge);
2700 
2701   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2702   // ("ge" status accumulated from cmp and subs instructions) then load
2703   // offset from table, otherwise load offset for default case
2704 
2705   if(ProfileInterpreter) {
2706     Label default_case, continue_execution;
2707 
2708     __ b(default_case, lt);
2709     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2710     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2711     __ b(continue_execution);
2712 
2713     __ bind(default_case);
2714     __ profile_switch_default(R0_tmp);
2715     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2716 
2717     __ bind(continue_execution);
2718   } else {
2719     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2720     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2721   }
2722 #endif // AARCH64
2723 
2724   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2725 
2726   // load the next bytecode to R3_bytecode and advance Rbcp
2727 #ifdef AARCH64
2728   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2729   __ ldrb(R3_bytecode, Address(Rbcp));
2730 #else
2731   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2732 #endif // AARCH64
2733   __ dispatch_only(vtos);
2734 
2735 }
2736 
2737 
2738 void TemplateTable::lookupswitch() {
2739   transition(itos, itos);
2740   __ stop("lookupswitch bytecode should have been rewritten");
2741 }
2742 
2743 
2744 void TemplateTable::fast_linearswitch() {
2745   transition(itos, vtos);
2746   Label loop, found, default_case, continue_execution;
2747 
2748   const Register Rkey     = R0_tos;
2749   const Register Rabcp    = R2_tmp;  // aligned bcp
2750   const Register Rdefault = R3_tmp;
2751   const Register Rcount   = R4_tmp;
2752   const Register Roffset  = R5_tmp;
2753 
2754   // bswap Rkey, so we can avoid bswapping the table entries
2755   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2756 
2757   // align bcp
2758   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2759   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2760 
2761   // load default & counter
2762 #ifdef AARCH64
2763   __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2764 #else
2765   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2766 #endif // AARCH64
2767   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2768 
2769 #ifdef AARCH64
2770   __ cbz_w(Rcount, default_case);
2771 #else
2772   __ cmp_32(Rcount, 0);
2773   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2774   __ b(default_case, eq);
2775 #endif // AARCH64
2776 
2777   // table search
2778   __ bind(loop);
2779 #ifdef AARCH64
2780   __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2781 #endif // AARCH64
2782   __ cmp_32(Rtemp, Rkey);
2783   __ b(found, eq);
2784   __ subs(Rcount, Rcount, 1);
2785 #ifndef AARCH64
2786   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2787 #endif // !AARCH64
2788   __ b(loop, ne);
2789 
2790   // default case
2791   __ bind(default_case);
2792   __ profile_switch_default(R0_tmp);
2793   __ mov(Roffset, Rdefault);
2794   __ b(continue_execution);
2795 
2796   // entry found -> get offset
2797   __ bind(found);
2798   // Rabcp is already incremented and points to the next entry
2799   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2800   if (ProfileInterpreter) {
2801     // Calculate index of the selected case.
2802     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2803 
2804     // align bcp
2805     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2806     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2807 
2808     // load number of cases
2809     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2810     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2811 
2812     // Selected index = <number of cases> - <current loop count>
2813     __ sub(R1_tmp, R2_tmp, Rcount);
2814     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2815   }
2816 
2817   // continue execution
2818   __ bind(continue_execution);
2819   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2820 
2821   // load the next bytecode to R3_bytecode and advance Rbcp
2822 #ifdef AARCH64
2823   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2824   __ ldrb(R3_bytecode, Address(Rbcp));
2825 #else
2826   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2827 #endif // AARCH64
2828   __ dispatch_only(vtos);
2829 }
2830 
2831 
2832 void TemplateTable::fast_binaryswitch() {
2833   transition(itos, vtos);
2834   // Implementation using the following core algorithm:
2835   //
2836   // int binary_search(int key, LookupswitchPair* array, int n) {
2837   //   // Binary search according to "Methodik des Programmierens" by
2838   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2839   //   int i = 0;
2840   //   int j = n;
2841   //   while (i+1 < j) {
2842   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2843   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2844   //     // where a stands for the array and assuming that the (inexisting)
2845   //     // element a[n] is infinitely big.
2846   //     int h = (i + j) >> 1;
2847   //     // i < h < j
2848   //     if (key < array[h].fast_match()) {
2849   //       j = h;
2850   //     } else {
2851   //       i = h;
2852   //     }
2853   //   }
2854   //   // R: a[i] <= key < a[i+1] or Q
2855   //   // (i.e., if key is within array, i is the correct index)
2856   //   return i;
2857   // }
2858 
2859   // register allocation
2860   const Register key    = R0_tos;                // already set (tosca)
2861   const Register array  = R1_tmp;
2862   const Register i      = R2_tmp;
2863   const Register j      = R3_tmp;
2864   const Register h      = R4_tmp;
2865   const Register val    = R5_tmp;
2866   const Register temp1  = Rtemp;
2867   const Register temp2  = LR_tmp;
2868   const Register offset = R3_tmp;
2869 
2870   // set 'array' = aligned bcp + 2 ints
2871   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2872   __ align_reg(array, temp1, BytesPerInt);
2873 
2874   // initialize i & j
2875   __ mov(i, 0);                                  // i = 0;
2876   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2877   // Convert j into native byteordering
2878   __ byteswap_u32(j, temp1, temp2);
2879 
2880   // and start
2881   Label entry;
2882   __ b(entry);
2883 
2884   // binary search loop
2885   { Label loop;
2886     __ bind(loop);
2887     // int h = (i + j) >> 1;
2888     __ add(h, i, j);                             // h = i + j;
2889     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2890     // if (key < array[h].fast_match()) {
2891     //   j = h;
2892     // } else {
2893     //   i = h;
2894     // }
2895 #ifdef AARCH64
2896     __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2897     __ ldr_s32(val, Address(temp1));
2898 #else
2899     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2900 #endif // AARCH64
2901     // Convert array[h].match to native byte-ordering before compare
2902     __ byteswap_u32(val, temp1, temp2);
2903     __ cmp_32(key, val);
2904     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2905     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2906     // while (i+1 < j)
2907     __ bind(entry);
2908     __ add(temp1, i, 1);                             // i+1
2909     __ cmp(temp1, j);                                // i+1 < j
2910     __ b(loop, lt);
2911   }
2912 
2913   // end of binary search, result index is i (must check again!)
2914   Label default_case;
2915   // Convert array[i].match to native byte-ordering before compare
2916 #ifdef AARCH64
2917   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2918   __ ldr_s32(val, Address(temp1));
2919 #else
2920   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2921 #endif // AARCH64
2922   __ byteswap_u32(val, temp1, temp2);
2923   __ cmp_32(key, val);
2924   __ b(default_case, ne);
2925 
2926   // entry found
2927   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2928   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2929   __ profile_switch_case(R0, i, R1, i);
2930   __ byteswap_u32(offset, temp1, temp2);
2931 #ifdef AARCH64
2932   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2933   __ ldrb(R3_bytecode, Address(Rbcp));
2934 #else
2935   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2936 #endif // AARCH64
2937   __ dispatch_only(vtos);
2938 
2939   // default case
2940   __ bind(default_case);
2941   __ profile_switch_default(R0);
2942   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2943   __ byteswap_u32(offset, temp1, temp2);
2944 #ifdef AARCH64
2945   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2946   __ ldrb(R3_bytecode, Address(Rbcp));
2947 #else
2948   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2949 #endif // AARCH64
2950   __ dispatch_only(vtos);
2951 }
2952 
2953 
2954 void TemplateTable::_return(TosState state) {
2955   transition(state, state);
2956   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2957 
2958   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2959     Label skip_register_finalizer;
2960     assert(state == vtos, "only valid state");
2961     __ ldr(R1, aaddress(0));
2962     __ load_klass(Rtemp, R1);
2963     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2964     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2965 
2966     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2967 
2968     __ bind(skip_register_finalizer);
2969   }
2970 
2971   // Narrow result if state is itos but result type is smaller.
2972   // Need to narrow in the return bytecode rather than in generate_return_entry
2973   // since compiled code callers expect the result to already be narrowed.
2974   if (state == itos) {
2975     __ narrow(R0_tos);
2976   }
2977   __ remove_activation(state, LR);
2978 
2979   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2980 
2981 #ifndef AARCH64
2982   // According to interpreter calling conventions, result is returned in R0/R1,
2983   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2984   // This conversion should be done after remove_activation, as it uses
2985   // push(state) & pop(state) to preserve return value.
2986   __ convert_tos_to_retval(state);
2987 #endif // !AARCH64
2988 
2989   __ ret();
2990 
2991   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2992   __ nop();
2993 }
2994 
2995 
2996 // ----------------------------------------------------------------------------
2997 // Volatile variables demand their effects be made known to all CPU's in
2998 // order.  Store buffers on most chips allow reads & writes to reorder; the
2999 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
3000 // memory barrier (i.e., it's not sufficient that the interpreter does not
3001 // reorder volatile references, the hardware also must not reorder them).
3002 //
3003 // According to the new Java Memory Model (JMM):
3004 // (1) All volatiles are serialized wrt to each other.
3005 // ALSO reads & writes act as aquire & release, so:
3006 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
3007 // the read float up to before the read.  It's OK for non-volatile memory refs
3008 // that happen before the volatile read to float down below it.
3009 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
3010 // that happen BEFORE the write float down to after the write.  It's OK for
3011 // non-volatile memory refs that happen after the volatile write to float up
3012 // before it.
3013 //
3014 // We only put in barriers around volatile refs (they are expensive), not
3015 // _between_ memory refs (that would require us to track the flavor of the
3016 // previous memory refs).  Requirements (2) and (3) require some barriers
3017 // before volatile stores and after volatile loads.  These nearly cover
3018 // requirement (1) but miss the volatile-store-volatile-load case.  This final
3019 // case is placed after volatile-stores although it could just as well go
3020 // before volatile-loads.
3021 // TODO-AARCH64: consider removing extra unused parameters
3022 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
3023                                      Register tmp,
3024                                      bool preserve_flags,
3025                                      Register load_tgt) {
3026 #ifdef AARCH64
3027   __ membar(order_constraint);
3028 #else
3029   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
3030 #endif
3031 }
3032 
3033 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
3034 void TemplateTable::resolve_cache_and_index(int byte_no,
3035                                             Register Rcache,
3036                                             Register Rindex,
3037                                             size_t index_size) {
3038   assert_different_registers(Rcache, Rindex, Rtemp);
3039 
3040   Label resolved;
3041   Bytecodes::Code code = bytecode();
3042   switch (code) {
3043   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
3044   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
3045   }
3046 
3047   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
3048   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
3049   __ cmp(Rtemp, code);  // have we resolved this bytecode?
3050   __ b(resolved, eq);
3051 
3052   // resolve first time through
3053   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
3054   __ mov(R1, code);
3055   __ call_VM(noreg, entry, R1);
3056   // Update registers with resolved info
3057   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
3058   __ bind(resolved);
3059 }
3060 
3061 
3062 // The Rcache and Rindex registers must be set before call
3063 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
3064                                               Register Rindex,
3065                                               Register Roffset,
3066                                               Register Rflags,
3067                                               Register Robj,
3068                                               bool is_static = false) {
3069 
3070   assert_different_registers(Rcache, Rindex, Rtemp);
3071   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
3072 
3073   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3074 
3075   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3076 
3077   // Field offset
3078   __ ldr(Roffset, Address(Rtemp,
3079            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
3080 
3081   // Flags
3082   __ ldr_u32(Rflags, Address(Rtemp,
3083            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3084 
3085   if (is_static) {
3086     __ ldr(Robj, Address(Rtemp,
3087              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
3088     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3089     __ ldr(Robj, Address(Robj, mirror_offset));
3090     __ resolve_oop_handle(Robj);
3091   }
3092 }
3093 
3094 
3095 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
3096 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
3097                                                Register method,
3098                                                Register itable_index,
3099                                                Register flags,
3100                                                bool is_invokevirtual,
3101                                                bool is_invokevfinal/*unused*/,
3102                                                bool is_invokedynamic) {
3103   // setup registers
3104   const Register cache = R2_tmp;
3105   const Register index = R3_tmp;
3106   const Register temp_reg = Rtemp;
3107   assert_different_registers(cache, index, temp_reg);
3108   assert_different_registers(method, itable_index, temp_reg);
3109 
3110   // determine constant pool cache field offsets
3111   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
3112   const int method_offset = in_bytes(
3113     ConstantPoolCache::base_offset() +
3114       ((byte_no == f2_byte)
3115        ? ConstantPoolCacheEntry::f2_offset()
3116        : ConstantPoolCacheEntry::f1_offset()
3117       )
3118     );
3119   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
3120                                     ConstantPoolCacheEntry::flags_offset());
3121   // access constant pool cache fields
3122   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
3123                                     ConstantPoolCacheEntry::f2_offset());
3124 
3125   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
3126   resolve_cache_and_index(byte_no, cache, index, index_size);
3127     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
3128     __ ldr(method, Address(temp_reg, method_offset));
3129 
3130   if (itable_index != noreg) {
3131     __ ldr(itable_index, Address(temp_reg, index_offset));
3132   }
3133   __ ldr_u32(flags, Address(temp_reg, flags_offset));
3134 }
3135 
3136 
3137 // The registers cache and index expected to be set before call, and should not be Rtemp.
3138 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3139 // except cache and index registers which are preserved.
3140 void TemplateTable::jvmti_post_field_access(Register Rcache,
3141                                             Register Rindex,
3142                                             bool is_static,
3143                                             bool has_tos) {
3144   assert_different_registers(Rcache, Rindex, Rtemp);
3145 
3146   if (__ can_post_field_access()) {
3147     // Check to see if a field access watch has been set before we take
3148     // the time to call into the VM.
3149 
3150     Label Lcontinue;
3151 
3152     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
3153     __ cbz(Rtemp, Lcontinue);
3154 
3155     // cache entry pointer
3156     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3157     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
3158     if (is_static) {
3159       __ mov(R1, 0);        // NULL object reference
3160     } else {
3161       __ pop(atos);         // Get the object
3162       __ mov(R1, R0_tos);
3163       __ verify_oop(R1);
3164       __ push(atos);        // Restore stack state
3165     }
3166     // R1: object pointer or NULL
3167     // R2: cache entry pointer
3168     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3169                R1, R2);
3170     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3171 
3172     __ bind(Lcontinue);
3173   }
3174 }
3175 
3176 
3177 void TemplateTable::pop_and_check_object(Register r) {
3178   __ pop_ptr(r);
3179   __ null_check(r, Rtemp);  // for field access must check obj.
3180   __ verify_oop(r);
3181 }
3182 
3183 
3184 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3185   transition(vtos, vtos);
3186 
3187   const Register Roffset  = R2_tmp;
3188   const Register Robj     = R3_tmp;
3189   const Register Rcache   = R4_tmp;
3190   const Register Rflagsav = Rtmp_save0;  // R4/R19
3191   const Register Rindex   = R5_tmp;
3192   const Register Rflags   = R5_tmp;
3193 
3194   const bool gen_volatile_check = os::is_MP();
3195 
3196   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3197   jvmti_post_field_access(Rcache, Rindex, is_static, false);
3198   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3199 
3200   if (gen_volatile_check) {
3201     __ mov(Rflagsav, Rflags);
3202   }
3203 
3204   if (!is_static) pop_and_check_object(Robj);
3205 
3206   Label Done, Lint, Ltable, shouldNotReachHere;
3207   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3208 
3209   // compute type
3210   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3211   // Make sure we don't need to mask flags after the above shift
3212   ConstantPoolCacheEntry::verify_tos_state_shift();
3213 
3214   // There are actually two versions of implementation of getfield/getstatic:
3215   //
3216   // 32-bit ARM:
3217   // 1) Table switch using add(PC,...) instruction (fast_version)
3218   // 2) Table switch using ldr(PC,...) instruction
3219   //
3220   // AArch64:
3221   // 1) Table switch using adr/add/br instructions (fast_version)
3222   // 2) Table switch using adr/ldr/br instructions
3223   //
3224   // First version requires fixed size of code block for each case and
3225   // can not be used in RewriteBytecodes and VerifyOops
3226   // modes.
3227 
3228   // Size of fixed size code block for fast_version
3229   const int log_max_block_size = 2;
3230   const int max_block_size = 1 << log_max_block_size;
3231 
3232   // Decide if fast version is enabled
3233   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3234 
3235   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3236   // atos requires additional processing in slow version.
3237   // On AArch64 atos and itos cannot be merged.
3238   bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3239 
3240   assert(number_of_states == 10, "number of tos states should be equal to 9");
3241 
3242   __ cmp(Rflags, itos);
3243 #ifdef AARCH64
3244   __ b(Lint, eq);
3245 
3246   if(fast_version) {
3247     __ adr(Rtemp, Lbtos);
3248     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3249     __ br(Rtemp);
3250   } else {
3251     __ adr(Rtemp, Ltable);
3252     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3253     __ br(Rtemp);
3254   }
3255 #else
3256   if(atos_merged_with_itos) {
3257     __ cmp(Rflags, atos, ne);
3258   }
3259 
3260   // table switch by type
3261   if(fast_version) {
3262     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3263   } else {
3264     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3265   }
3266 
3267   // jump to itos/atos case
3268   __ b(Lint);
3269 #endif // AARCH64
3270 
3271   // table with addresses for slow version
3272   if (fast_version) {
3273     // nothing to do
3274   } else  {
3275     AARCH64_ONLY(__ align(wordSize));
3276     __ bind(Ltable);
3277     __ emit_address(Lbtos);
3278     __ emit_address(Lztos);
3279     __ emit_address(Lctos);
3280     __ emit_address(Lstos);
3281     __ emit_address(Litos);
3282     __ emit_address(Lltos);
3283     __ emit_address(Lftos);
3284     __ emit_address(Ldtos);
3285     __ emit_address(Latos);
3286   }
3287 
3288 #ifdef ASSERT
3289   int seq = 0;
3290 #endif
3291   // btos
3292   {
3293     assert(btos == seq++, "btos has unexpected value");
3294     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3295     __ bind(Lbtos);
3296     __ ldrsb(R0_tos, Address(Robj, Roffset));
3297     __ push(btos);
3298     // Rewrite bytecode to be faster
3299     if (!is_static && rc == may_rewrite) {
3300       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3301     }
3302     __ b(Done);
3303   }
3304 
3305   // ztos (same as btos for getfield)
3306   {
3307     assert(ztos == seq++, "btos has unexpected value");
3308     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3309     __ bind(Lztos);
3310     __ ldrsb(R0_tos, Address(Robj, Roffset));
3311     __ push(ztos);
3312     // Rewrite bytecode to be faster (use btos fast getfield)
3313     if (!is_static && rc == may_rewrite) {
3314       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3315     }
3316     __ b(Done);
3317   }
3318 
3319   // ctos
3320   {
3321     assert(ctos == seq++, "ctos has unexpected value");
3322     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3323     __ bind(Lctos);
3324     __ ldrh(R0_tos, Address(Robj, Roffset));
3325     __ push(ctos);
3326     if (!is_static && rc == may_rewrite) {
3327       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3328     }
3329     __ b(Done);
3330   }
3331 
3332   // stos
3333   {
3334     assert(stos == seq++, "stos has unexpected value");
3335     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3336     __ bind(Lstos);
3337     __ ldrsh(R0_tos, Address(Robj, Roffset));
3338     __ push(stos);
3339     if (!is_static && rc == may_rewrite) {
3340       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3341     }
3342     __ b(Done);
3343   }
3344 
3345   // itos
3346   {
3347     assert(itos == seq++, "itos has unexpected value");
3348     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3349     __ bind(Litos);
3350     __ b(shouldNotReachHere);
3351   }
3352 
3353   // ltos
3354   {
3355     assert(ltos == seq++, "ltos has unexpected value");
3356     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3357     __ bind(Lltos);
3358 #ifdef AARCH64
3359     __ ldr(R0_tos, Address(Robj, Roffset));
3360 #else
3361     __ add(Roffset, Robj, Roffset);
3362     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3363 #endif // AARCH64
3364     __ push(ltos);
3365     if (!is_static && rc == may_rewrite) {
3366       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3367     }
3368     __ b(Done);
3369   }
3370 
3371   // ftos
3372   {
3373     assert(ftos == seq++, "ftos has unexpected value");
3374     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3375     __ bind(Lftos);
3376     // floats and ints are placed on stack in same way, so
3377     // we can use push(itos) to transfer value without using VFP
3378     __ ldr_u32(R0_tos, Address(Robj, Roffset));
3379     __ push(itos);
3380     if (!is_static && rc == may_rewrite) {
3381       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3382     }
3383     __ b(Done);
3384   }
3385 
3386   // dtos
3387   {
3388     assert(dtos == seq++, "dtos has unexpected value");
3389     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3390     __ bind(Ldtos);
3391     // doubles and longs are placed on stack in the same way, so
3392     // we can use push(ltos) to transfer value without using VFP
3393 #ifdef AARCH64
3394     __ ldr(R0_tos, Address(Robj, Roffset));
3395 #else
3396     __ add(Rtemp, Robj, Roffset);
3397     __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3398 #endif // AARCH64
3399     __ push(ltos);
3400     if (!is_static && rc == may_rewrite) {
3401       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3402     }
3403     __ b(Done);
3404   }
3405 
3406   // atos
3407   {
3408     assert(atos == seq++, "atos has unexpected value");
3409 
3410     // atos case for AArch64 and slow version on 32-bit ARM
3411     if(!atos_merged_with_itos) {
3412       __ bind(Latos);
3413       __ load_heap_oop(R0_tos, Address(Robj, Roffset));
3414       __ push(atos);
3415       // Rewrite bytecode to be faster
3416       if (!is_static && rc == may_rewrite) {
3417         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3418       }
3419       __ b(Done);
3420     }
3421   }
3422 
3423   assert(vtos == seq++, "vtos has unexpected value");
3424 
3425   __ bind(shouldNotReachHere);
3426   __ should_not_reach_here();
3427 
3428   // itos and atos cases are frequent so it makes sense to move them out of table switch
3429   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3430 
3431   __ bind(Lint);
3432   __ ldr_s32(R0_tos, Address(Robj, Roffset));
3433   __ push(itos);
3434   // Rewrite bytecode to be faster
3435   if (!is_static && rc == may_rewrite) {
3436     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3437   }
3438 
3439   __ bind(Done);
3440 
3441   if (gen_volatile_check) {
3442     // Check for volatile field
3443     Label notVolatile;
3444     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3445 
3446     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3447 
3448     __ bind(notVolatile);
3449   }
3450 
3451 }
3452 
3453 void TemplateTable::getfield(int byte_no) {
3454   getfield_or_static(byte_no, false);
3455 }
3456 
3457 void TemplateTable::nofast_getfield(int byte_no) {
3458   getfield_or_static(byte_no, false, may_not_rewrite);
3459 }
3460 
3461 void TemplateTable::getstatic(int byte_no) {
3462   getfield_or_static(byte_no, true);
3463 }
3464 
3465 
3466 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3467 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3468 // except cache and index registers which are preserved.
3469 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3470   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3471   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3472 
3473   if (__ can_post_field_modification()) {
3474     // Check to see if a field modification watch has been set before we take
3475     // the time to call into the VM.
3476     Label Lcontinue;
3477 
3478     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3479     __ cbz(Rtemp, Lcontinue);
3480 
3481     if (is_static) {
3482       // Life is simple.  Null out the object pointer.
3483       __ mov(R1, 0);
3484     } else {
3485       // Life is harder. The stack holds the value on top, followed by the object.
3486       // We don't know the size of the value, though; it could be one or two words
3487       // depending on its type. As a result, we must find the type to determine where
3488       // the object is.
3489 
3490       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3491       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3492 
3493       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3494       // Make sure we don't need to mask Rtemp after the above shift
3495       ConstantPoolCacheEntry::verify_tos_state_shift();
3496 
3497       __ cmp(Rtemp, ltos);
3498       __ cond_cmp(Rtemp, dtos, ne);
3499 #ifdef AARCH64
3500       __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3501       __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3502       __ mov(R1, Rtemp, eq);
3503       __ ldr(R1, Address(Rstack_top, R1));
3504 #else
3505       // two word value (ltos/dtos)
3506       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3507 
3508       // one word value (not ltos, dtos)
3509       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3510 #endif // AARCH64
3511     }
3512 
3513     // cache entry pointer
3514     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3515     __ add(R2, R2, in_bytes(cp_base_offset));
3516 
3517     // object (tos)
3518     __ mov(R3, Rstack_top);
3519 
3520     // R1: object pointer set up above (NULL if static)
3521     // R2: cache entry pointer
3522     // R3: value object on the stack
3523     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3524                R1, R2, R3);
3525     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3526 
3527     __ bind(Lcontinue);
3528   }
3529 }
3530 
3531 
3532 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3533   transition(vtos, vtos);
3534 
3535   const Register Roffset  = R2_tmp;
3536   const Register Robj     = R3_tmp;
3537   const Register Rcache   = R4_tmp;
3538   const Register Rflagsav = Rtmp_save0;  // R4/R19
3539   const Register Rindex   = R5_tmp;
3540   const Register Rflags   = R5_tmp;
3541 
3542   const bool gen_volatile_check = os::is_MP();
3543 
3544   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3545   jvmti_post_field_mod(Rcache, Rindex, is_static);
3546   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3547 
3548   if (gen_volatile_check) {
3549     // Check for volatile field
3550     Label notVolatile;
3551     __ mov(Rflagsav, Rflags);
3552     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3553 
3554     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3555 
3556     __ bind(notVolatile);
3557   }
3558 
3559   Label Done, Lint, shouldNotReachHere;
3560   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3561 
3562   // compute type
3563   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3564   // Make sure we don't need to mask flags after the above shift
3565   ConstantPoolCacheEntry::verify_tos_state_shift();
3566 
3567   // There are actually two versions of implementation of putfield/putstatic:
3568   //
3569   // 32-bit ARM:
3570   // 1) Table switch using add(PC,...) instruction (fast_version)
3571   // 2) Table switch using ldr(PC,...) instruction
3572   //
3573   // AArch64:
3574   // 1) Table switch using adr/add/br instructions (fast_version)
3575   // 2) Table switch using adr/ldr/br instructions
3576   //
3577   // First version requires fixed size of code block for each case and
3578   // can not be used in RewriteBytecodes and VerifyOops
3579   // modes.
3580 
3581   // Size of fixed size code block for fast_version (in instructions)
3582   const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3583   const int max_block_size = 1 << log_max_block_size;
3584 
3585   // Decide if fast version is enabled
3586   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3587 
3588   assert(number_of_states == 10, "number of tos states should be equal to 9");
3589 
3590   // itos case is frequent and is moved outside table switch
3591   __ cmp(Rflags, itos);
3592 
3593 #ifdef AARCH64
3594   __ b(Lint, eq);
3595 
3596   if (fast_version) {
3597     __ adr(Rtemp, Lbtos);
3598     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3599     __ br(Rtemp);
3600   } else {
3601     __ adr(Rtemp, Ltable);
3602     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3603     __ br(Rtemp);
3604   }
3605 #else
3606   // table switch by type
3607   if (fast_version) {
3608     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3609   } else  {
3610     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3611   }
3612 
3613   // jump to itos case
3614   __ b(Lint);
3615 #endif // AARCH64
3616 
3617   // table with addresses for slow version
3618   if (fast_version) {
3619     // nothing to do
3620   } else  {
3621     AARCH64_ONLY(__ align(wordSize));
3622     __ bind(Ltable);
3623     __ emit_address(Lbtos);
3624     __ emit_address(Lztos);
3625     __ emit_address(Lctos);
3626     __ emit_address(Lstos);
3627     __ emit_address(Litos);
3628     __ emit_address(Lltos);
3629     __ emit_address(Lftos);
3630     __ emit_address(Ldtos);
3631     __ emit_address(Latos);
3632   }
3633 
3634 #ifdef ASSERT
3635   int seq = 0;
3636 #endif
3637   // btos
3638   {
3639     assert(btos == seq++, "btos has unexpected value");
3640     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3641     __ bind(Lbtos);
3642     __ pop(btos);
3643     if (!is_static) pop_and_check_object(Robj);
3644     __ strb(R0_tos, Address(Robj, Roffset));
3645     if (!is_static && rc == may_rewrite) {
3646       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3647     }
3648     __ b(Done);
3649   }
3650 
3651   // ztos
3652   {
3653     assert(ztos == seq++, "ztos has unexpected value");
3654     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3655     __ bind(Lztos);
3656     __ pop(ztos);
3657     if (!is_static) pop_and_check_object(Robj);
3658     __ and_32(R0_tos, R0_tos, 1);
3659     __ strb(R0_tos, Address(Robj, Roffset));
3660     if (!is_static && rc == may_rewrite) {
3661       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3662     }
3663     __ b(Done);
3664   }
3665 
3666   // ctos
3667   {
3668     assert(ctos == seq++, "ctos has unexpected value");
3669     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3670     __ bind(Lctos);
3671     __ pop(ctos);
3672     if (!is_static) pop_and_check_object(Robj);
3673     __ strh(R0_tos, Address(Robj, Roffset));
3674     if (!is_static && rc == may_rewrite) {
3675       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3676     }
3677     __ b(Done);
3678   }
3679 
3680   // stos
3681   {
3682     assert(stos == seq++, "stos has unexpected value");
3683     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3684     __ bind(Lstos);
3685     __ pop(stos);
3686     if (!is_static) pop_and_check_object(Robj);
3687     __ strh(R0_tos, Address(Robj, Roffset));
3688     if (!is_static && rc == may_rewrite) {
3689       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3690     }
3691     __ b(Done);
3692   }
3693 
3694   // itos
3695   {
3696     assert(itos == seq++, "itos has unexpected value");
3697     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3698     __ bind(Litos);
3699     __ b(shouldNotReachHere);
3700   }
3701 
3702   // ltos
3703   {
3704     assert(ltos == seq++, "ltos has unexpected value");
3705     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3706     __ bind(Lltos);
3707     __ pop(ltos);
3708     if (!is_static) pop_and_check_object(Robj);
3709 #ifdef AARCH64
3710     __ str(R0_tos, Address(Robj, Roffset));
3711 #else
3712     __ add(Roffset, Robj, Roffset);
3713     __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3714 #endif // AARCH64
3715     if (!is_static && rc == may_rewrite) {
3716       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3717     }
3718     __ b(Done);
3719   }
3720 
3721   // ftos
3722   {
3723     assert(ftos == seq++, "ftos has unexpected value");
3724     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3725     __ bind(Lftos);
3726     // floats and ints are placed on stack in the same way, so
3727     // we can use pop(itos) to transfer value without using VFP
3728     __ pop(itos);
3729     if (!is_static) pop_and_check_object(Robj);
3730     __ str_32(R0_tos, Address(Robj, Roffset));
3731     if (!is_static && rc == may_rewrite) {
3732       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3733     }
3734     __ b(Done);
3735   }
3736 
3737   // dtos
3738   {
3739     assert(dtos == seq++, "dtos has unexpected value");
3740     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3741     __ bind(Ldtos);
3742     // doubles and longs are placed on stack in the same way, so
3743     // we can use pop(ltos) to transfer value without using VFP
3744     __ pop(ltos);
3745     if (!is_static) pop_and_check_object(Robj);
3746 #ifdef AARCH64
3747     __ str(R0_tos, Address(Robj, Roffset));
3748 #else
3749     __ add(Rtemp, Robj, Roffset);
3750     __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3751 #endif // AARCH64
3752     if (!is_static && rc == may_rewrite) {
3753       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3754     }
3755     __ b(Done);
3756   }
3757 
3758   // atos
3759   {
3760     assert(atos == seq++, "dtos has unexpected value");
3761     __ bind(Latos);
3762     __ pop(atos);
3763     if (!is_static) pop_and_check_object(Robj);
3764     // Store into the field
3765     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, _bs->kind(), false, false);
3766     if (!is_static && rc == may_rewrite) {
3767       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3768     }
3769     __ b(Done);
3770   }
3771 
3772   __ bind(shouldNotReachHere);
3773   __ should_not_reach_here();
3774 
3775   // itos case is frequent and is moved outside table switch
3776   __ bind(Lint);
3777   __ pop(itos);
3778   if (!is_static) pop_and_check_object(Robj);
3779   __ str_32(R0_tos, Address(Robj, Roffset));
3780   if (!is_static && rc == may_rewrite) {
3781     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3782   }
3783 
3784   __ bind(Done);
3785 
3786   if (gen_volatile_check) {
3787     Label notVolatile;
3788     if (is_static) {
3789       // Just check for volatile. Memory barrier for static final field
3790       // is handled by class initialization.
3791       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3792       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3793       __ bind(notVolatile);
3794     } else {
3795       // Check for volatile field and final field
3796       Label skipMembar;
3797 
3798       __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3799                        1 << ConstantPoolCacheEntry::is_final_shift);
3800       __ b(skipMembar, eq);
3801 
3802       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3803 
3804       // StoreLoad barrier after volatile field write
3805       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3806       __ b(skipMembar);
3807 
3808       // StoreStore barrier after final field write
3809       __ bind(notVolatile);
3810       volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3811 
3812       __ bind(skipMembar);
3813     }
3814   }
3815 
3816 }
3817 
3818 void TemplateTable::putfield(int byte_no) {
3819   putfield_or_static(byte_no, false);
3820 }
3821 
3822 void TemplateTable::nofast_putfield(int byte_no) {
3823   putfield_or_static(byte_no, false, may_not_rewrite);
3824 }
3825 
3826 void TemplateTable::putstatic(int byte_no) {
3827   putfield_or_static(byte_no, true);
3828 }
3829 
3830 
3831 void TemplateTable::jvmti_post_fast_field_mod() {
3832   // This version of jvmti_post_fast_field_mod() is not used on ARM
3833   Unimplemented();
3834 }
3835 
3836 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3837 // but preserves tosca with the given state.
3838 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3839   if (__ can_post_field_modification()) {
3840     // Check to see if a field modification watch has been set before we take
3841     // the time to call into the VM.
3842     Label done;
3843 
3844     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3845     __ cbz(R2, done);
3846 
3847     __ pop_ptr(R3);               // copy the object pointer from tos
3848     __ verify_oop(R3);
3849     __ push_ptr(R3);              // put the object pointer back on tos
3850 
3851     __ push(state);               // save value on the stack
3852 
3853     // access constant pool cache entry
3854     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3855 
3856     __ mov(R1, R3);
3857     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3858     __ mov(R3, Rstack_top); // put tos addr into R3
3859 
3860     // R1: object pointer copied above
3861     // R2: cache entry pointer
3862     // R3: jvalue object on the stack
3863     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3864 
3865     __ pop(state);                // restore value
3866 
3867     __ bind(done);
3868   }
3869 }
3870 
3871 
3872 void TemplateTable::fast_storefield(TosState state) {
3873   transition(state, vtos);
3874 
3875   ByteSize base = ConstantPoolCache::base_offset();
3876 
3877   jvmti_post_fast_field_mod(state);
3878 
3879   const Register Rcache  = R2_tmp;
3880   const Register Rindex  = R3_tmp;
3881   const Register Roffset = R3_tmp;
3882   const Register Rflags  = Rtmp_save0; // R4/R19
3883   const Register Robj    = R5_tmp;
3884 
3885   const bool gen_volatile_check = os::is_MP();
3886 
3887   // access constant pool cache
3888   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3889 
3890   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3891 
3892   if (gen_volatile_check) {
3893     // load flags to test volatile
3894     __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3895   }
3896 
3897   // replace index with field offset from cache entry
3898   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3899 
3900   if (gen_volatile_check) {
3901     // Check for volatile store
3902     Label notVolatile;
3903     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3904 
3905     // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3906     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3907 
3908     __ bind(notVolatile);
3909   }
3910 
3911   // Get object from stack
3912   pop_and_check_object(Robj);
3913 
3914   // access field
3915   switch (bytecode()) {
3916     case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3917                                      // fall through
3918     case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3919     case Bytecodes::_fast_sputfield: // fall through
3920     case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3921     case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3922 #ifdef AARCH64
3923     case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
3924     case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3925     case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3926 #else
3927     case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3928                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3929 
3930 #ifdef __SOFTFP__
3931     case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
3932     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3933                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3934 #else
3935     case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3936                                      __ fsts(S0_tos, Address(Robj));          break;
3937     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3938                                      __ fstd(D0_tos, Address(Robj));          break;
3939 #endif // __SOFTFP__
3940 #endif // AARCH64
3941 
3942     case Bytecodes::_fast_aputfield:
3943       do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, _bs->kind(), false, false);
3944       break;
3945 
3946     default:
3947       ShouldNotReachHere();
3948   }
3949 
3950   if (gen_volatile_check) {
3951     Label notVolatile;
3952     Label skipMembar;
3953     __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3954                    1 << ConstantPoolCacheEntry::is_final_shift);
3955     __ b(skipMembar, eq);
3956 
3957     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3958 
3959     // StoreLoad barrier after volatile field write
3960     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3961     __ b(skipMembar);
3962 
3963     // StoreStore barrier after final field write
3964     __ bind(notVolatile);
3965     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3966 
3967     __ bind(skipMembar);
3968   }
3969 }
3970 
3971 
3972 void TemplateTable::fast_accessfield(TosState state) {
3973   transition(atos, state);
3974 
3975   // do the JVMTI work here to avoid disturbing the register state below
3976   if (__ can_post_field_access()) {
3977     // Check to see if a field access watch has been set before we take
3978     // the time to call into the VM.
3979     Label done;
3980     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3981     __ cbz(R2, done);
3982     // access constant pool cache entry
3983     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3984     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3985     __ verify_oop(R0_tos);
3986     __ mov(R1, R0_tos);
3987     // R1: object pointer copied above
3988     // R2: cache entry pointer
3989     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3990     __ pop_ptr(R0_tos);   // restore object pointer
3991 
3992     __ bind(done);
3993   }
3994 
3995   const Register Robj    = R0_tos;
3996   const Register Rcache  = R2_tmp;
3997   const Register Rflags  = R2_tmp;
3998   const Register Rindex  = R3_tmp;
3999   const Register Roffset = R3_tmp;
4000 
4001   const bool gen_volatile_check = os::is_MP();
4002 
4003   // access constant pool cache
4004   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
4005   // replace index with field offset from cache entry
4006   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
4007   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
4008 
4009   if (gen_volatile_check) {
4010     // load flags to test volatile
4011     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
4012   }
4013 
4014   __ verify_oop(Robj);
4015   __ null_check(Robj, Rtemp);
4016 
4017   // access field
4018   switch (bytecode()) {
4019     case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
4020     case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
4021     case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
4022     case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
4023 #ifdef AARCH64
4024     case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
4025     case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
4026     case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
4027 #else
4028     case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
4029                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
4030 #ifdef __SOFTFP__
4031     case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
4032     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
4033                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
4034 #else
4035     case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
4036     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
4037 #endif // __SOFTFP__
4038 #endif // AARCH64
4039     case Bytecodes::_fast_agetfield: __ load_heap_oop(R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
4040     default:
4041       ShouldNotReachHere();
4042   }
4043 
4044   if (gen_volatile_check) {
4045     // Check for volatile load
4046     Label notVolatile;
4047     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4048 
4049     // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
4050     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4051 
4052     __ bind(notVolatile);
4053   }
4054 }
4055 
4056 
4057 void TemplateTable::fast_xaccess(TosState state) {
4058   transition(vtos, state);
4059 
4060   const Register Robj = R1_tmp;
4061   const Register Rcache = R2_tmp;
4062   const Register Rindex = R3_tmp;
4063   const Register Roffset = R3_tmp;
4064   const Register Rflags = R4_tmp;
4065   Label done;
4066 
4067   // get receiver
4068   __ ldr(Robj, aaddress(0));
4069 
4070   // access constant pool cache
4071   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
4072   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
4073   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
4074 
4075   const bool gen_volatile_check = os::is_MP();
4076 
4077   if (gen_volatile_check) {
4078     // load flags to test volatile
4079     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
4080   }
4081 
4082   // make sure exception is reported in correct bcp range (getfield is next instruction)
4083   __ add(Rbcp, Rbcp, 1);
4084   __ null_check(Robj, Rtemp);
4085   __ sub(Rbcp, Rbcp, 1);
4086 
4087 #ifdef AARCH64
4088   if (gen_volatile_check) {
4089     Label notVolatile;
4090     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4091 
4092     __ add(Rtemp, Robj, Roffset);
4093 
4094     if (state == itos) {
4095       __ ldar_w(R0_tos, Rtemp);
4096     } else if (state == atos) {
4097       if (UseCompressedOops) {
4098         __ ldar_w(R0_tos, Rtemp);
4099         __ decode_heap_oop(R0_tos);
4100       } else {
4101         __ ldar(R0_tos, Rtemp);
4102       }
4103       __ verify_oop(R0_tos);
4104     } else if (state == ftos) {
4105       __ ldar_w(R0_tos, Rtemp);
4106       __ fmov_sw(S0_tos, R0_tos);
4107     } else {
4108       ShouldNotReachHere();
4109     }
4110     __ b(done);
4111 
4112     __ bind(notVolatile);
4113   }
4114 #endif // AARCH64
4115 
4116   if (state == itos) {
4117     __ ldr_s32(R0_tos, Address(Robj, Roffset));
4118   } else if (state == atos) {
4119     __ load_heap_oop(R0_tos, Address(Robj, Roffset));
4120     __ verify_oop(R0_tos);
4121   } else if (state == ftos) {
4122 #ifdef AARCH64
4123     __ ldr_s(S0_tos, Address(Robj, Roffset));
4124 #else
4125 #ifdef __SOFTFP__
4126     __ ldr(R0_tos, Address(Robj, Roffset));
4127 #else
4128     __ add(Roffset, Robj, Roffset);
4129     __ flds(S0_tos, Address(Roffset));
4130 #endif // __SOFTFP__
4131 #endif // AARCH64
4132   } else {
4133     ShouldNotReachHere();
4134   }
4135 
4136 #ifndef AARCH64
4137   if (gen_volatile_check) {
4138     // Check for volatile load
4139     Label notVolatile;
4140     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4141 
4142     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4143 
4144     __ bind(notVolatile);
4145   }
4146 #endif // !AARCH64
4147 
4148   __ bind(done);
4149 }
4150 
4151 
4152 
4153 //----------------------------------------------------------------------------------------------------
4154 // Calls
4155 
4156 void TemplateTable::count_calls(Register method, Register temp) {
4157   // implemented elsewhere
4158   ShouldNotReachHere();
4159 }
4160 
4161 
4162 void TemplateTable::prepare_invoke(int byte_no,
4163                                    Register method,  // linked method (or i-klass)
4164                                    Register index,   // itable index, MethodType, etc.
4165                                    Register recv,    // if caller wants to see it
4166                                    Register flags    // if caller wants to test it
4167                                    ) {
4168   // determine flags
4169   const Bytecodes::Code code = bytecode();
4170   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
4171   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
4172   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
4173   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
4174   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
4175   const bool load_receiver       = (recv != noreg);
4176   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4177   assert(recv  == noreg || recv  == R2, "");
4178   assert(flags == noreg || flags == R3, "");
4179 
4180   // setup registers & access constant pool cache
4181   if (recv  == noreg)  recv  = R2;
4182   if (flags == noreg)  flags = R3;
4183   const Register temp = Rtemp;
4184   const Register ret_type = R1_tmp;
4185   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4186 
4187   // save 'interpreter return address'
4188   __ save_bcp();
4189 
4190   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4191 
4192   // maybe push extra argument
4193   if (is_invokedynamic || is_invokehandle) {
4194     Label L_no_push;
4195     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4196     __ mov(temp, index);
4197     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4198     __ load_resolved_reference_at_index(index, temp);
4199     __ verify_oop(index);
4200     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
4201     __ bind(L_no_push);
4202   }
4203 
4204   // load receiver if needed (after extra argument is pushed so parameter size is correct)
4205   if (load_receiver) {
4206     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
4207     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4208     __ ldr(recv, recv_addr);
4209     __ verify_oop(recv);
4210   }
4211 
4212   // compute return type
4213   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4214   // Make sure we don't need to mask flags after the above shift
4215   ConstantPoolCacheEntry::verify_tos_state_shift();
4216   // load return address
4217   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4218     __ mov_slow(temp, table);
4219     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4220   }
4221 }
4222 
4223 
4224 void TemplateTable::invokevirtual_helper(Register index,
4225                                          Register recv,
4226                                          Register flags) {
4227 
4228   const Register recv_klass = R2_tmp;
4229 
4230   assert_different_registers(index, recv, flags, Rtemp);
4231   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4232 
4233   // Test for an invoke of a final method
4234   Label notFinal;
4235   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4236 
4237   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4238 
4239   // do the call - the index is actually the method to call
4240 
4241   // It's final, need a null check here!
4242   __ null_check(recv, Rtemp);
4243 
4244   // profile this call
4245   __ profile_final_call(R0_tmp);
4246 
4247   __ jump_from_interpreted(Rmethod);
4248 
4249   __ bind(notFinal);
4250 
4251   // get receiver klass
4252   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4253   __ load_klass(recv_klass, recv);
4254 
4255   // profile this call
4256   __ profile_virtual_call(R0_tmp, recv_klass);
4257 
4258   // get target Method* & entry point
4259   const int base = in_bytes(Klass::vtable_start_offset());
4260   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4261   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4262   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4263   __ jump_from_interpreted(Rmethod);
4264 }
4265 
4266 void TemplateTable::invokevirtual(int byte_no) {
4267   transition(vtos, vtos);
4268   assert(byte_no == f2_byte, "use this argument");
4269 
4270   const Register Rrecv  = R2_tmp;
4271   const Register Rflags = R3_tmp;
4272 
4273   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4274 
4275   // Rmethod: index
4276   // Rrecv:   receiver
4277   // Rflags:  flags
4278   // LR:      return address
4279 
4280   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4281 }
4282 
4283 
4284 void TemplateTable::invokespecial(int byte_no) {
4285   transition(vtos, vtos);
4286   assert(byte_no == f1_byte, "use this argument");
4287   const Register Rrecv  = R2_tmp;
4288   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4289   __ verify_oop(Rrecv);
4290   __ null_check(Rrecv, Rtemp);
4291   // do the call
4292   __ profile_call(Rrecv);
4293   __ jump_from_interpreted(Rmethod);
4294 }
4295 
4296 
4297 void TemplateTable::invokestatic(int byte_no) {
4298   transition(vtos, vtos);
4299   assert(byte_no == f1_byte, "use this argument");
4300   prepare_invoke(byte_no, Rmethod);
4301   // do the call
4302   __ profile_call(R2_tmp);
4303   __ jump_from_interpreted(Rmethod);
4304 }
4305 
4306 
4307 void TemplateTable::fast_invokevfinal(int byte_no) {
4308   transition(vtos, vtos);
4309   assert(byte_no == f2_byte, "use this argument");
4310   __ stop("fast_invokevfinal is not used on ARM");
4311 }
4312 
4313 
4314 void TemplateTable::invokeinterface(int byte_no) {
4315   transition(vtos, vtos);
4316   assert(byte_no == f1_byte, "use this argument");
4317 
4318   const Register Ritable = R1_tmp;
4319   const Register Rrecv   = R2_tmp;
4320   const Register Rinterf = R5_tmp;
4321   const Register Rindex  = R4_tmp;
4322   const Register Rflags  = R3_tmp;
4323   const Register Rklass  = R3_tmp;
4324 
4325   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4326 
4327   // Special case of invokeinterface called for virtual method of
4328   // java.lang.Object.  See cpCacheOop.cpp for details.
4329   // This code isn't produced by javac, but could be produced by
4330   // another compliant java compiler.
4331   Label notMethod;
4332   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
4333 
4334   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4335   __ bind(notMethod);
4336 
4337   // Get receiver klass into Rklass - also a null check
4338   __ load_klass(Rklass, Rrecv);
4339 
4340   Label no_such_interface;
4341 
4342   // Receiver subtype check against REFC.
4343   __ lookup_interface_method(// inputs: rec. class, interface
4344                              Rklass, Rinterf, noreg,
4345                              // outputs:  scan temp. reg1, scan temp. reg2
4346                              noreg, Ritable, Rtemp,
4347                              no_such_interface);
4348 
4349   // profile this call
4350   __ profile_virtual_call(R0_tmp, Rklass);
4351 
4352   // Get declaring interface class from method
4353   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4354   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4355   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4356 
4357   // Get itable index from method
4358   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4359   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4360   __ neg(Rindex, Rtemp);
4361 
4362   __ lookup_interface_method(// inputs: rec. class, interface
4363                              Rklass, Rinterf, Rindex,
4364                              // outputs:  scan temp. reg1, scan temp. reg2
4365                              Rmethod, Ritable, Rtemp,
4366                              no_such_interface);
4367 
4368   // Rmethod: Method* to call
4369 
4370   // Check for abstract method error
4371   // Note: This should be done more efficiently via a throw_abstract_method_error
4372   //       interpreter entry point and a conditional jump to it in case of a null
4373   //       method.
4374   { Label L;
4375     __ cbnz(Rmethod, L);
4376     // throw exception
4377     // note: must restore interpreter registers to canonical
4378     //       state for exception handling to work correctly!
4379     __ restore_method();
4380     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4381     // the call_VM checks for exception, so we should never return here.
4382     __ should_not_reach_here();
4383     __ bind(L);
4384   }
4385 
4386   // do the call
4387   __ jump_from_interpreted(Rmethod);
4388 
4389   // throw exception
4390   __ bind(no_such_interface);
4391   __ restore_method();
4392   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4393   // the call_VM checks for exception, so we should never return here.
4394   __ should_not_reach_here();
4395 }
4396 
4397 void TemplateTable::invokehandle(int byte_no) {
4398   transition(vtos, vtos);
4399 
4400   // TODO-AARCH64 review register usage
4401   const Register Rrecv  = R2_tmp;
4402   const Register Rmtype = R4_tmp;
4403   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4404 
4405   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4406   __ null_check(Rrecv, Rtemp);
4407 
4408   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
4409   // Rmethod: MH.invokeExact_MT method (from f2)
4410 
4411   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
4412 
4413   // do the call
4414   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
4415   __ mov(Rmethod, R5_method);
4416   __ jump_from_interpreted(Rmethod);
4417 }
4418 
4419 void TemplateTable::invokedynamic(int byte_no) {
4420   transition(vtos, vtos);
4421 
4422   // TODO-AARCH64 review register usage
4423   const Register Rcallsite = R4_tmp;
4424   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4425 
4426   prepare_invoke(byte_no, R5_method, Rcallsite);
4427 
4428   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4429   // Rmethod:   MH.linkToCallSite method (from f2)
4430 
4431   // Note:  Rcallsite is already pushed by prepare_invoke
4432 
4433   if (ProfileInterpreter) {
4434     __ profile_call(R2_tmp);
4435   }
4436 
4437   // do the call
4438   __ mov(Rmethod, R5_method);
4439   __ jump_from_interpreted(Rmethod);
4440 }
4441 
4442 //----------------------------------------------------------------------------------------------------
4443 // Allocation
4444 
4445 void TemplateTable::_new() {
4446   transition(vtos, atos);
4447 
4448   const Register Robj   = R0_tos;
4449   const Register Rcpool = R1_tmp;
4450   const Register Rindex = R2_tmp;
4451   const Register Rtags  = R3_tmp;
4452   const Register Rsize  = R3_tmp;
4453 
4454   Register Rklass = R4_tmp;
4455   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4456   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4457 
4458   Label slow_case;
4459   Label done;
4460   Label initialize_header;
4461   Label initialize_object;  // including clearing the fields
4462 
4463   const bool allow_shared_alloc =
4464     Universe::heap()->supports_inline_contig_alloc();
4465 
4466   // Literals
4467   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4468 
4469   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4470   __ get_cpool_and_tags(Rcpool, Rtags);
4471 
4472   // Make sure the class we're about to instantiate has been resolved.
4473   // This is done before loading InstanceKlass to be consistent with the order
4474   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4475   const int tags_offset = Array<u1>::base_offset_in_bytes();
4476   __ add(Rtemp, Rtags, Rindex);
4477 
4478 #ifdef AARCH64
4479   __ add(Rtemp, Rtemp, tags_offset);
4480   __ ldarb(Rtemp, Rtemp);
4481 #else
4482   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4483 
4484   // use Rklass as a scratch
4485   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4486 #endif // AARCH64
4487 
4488   // get InstanceKlass
4489   __ cmp(Rtemp, JVM_CONSTANT_Class);
4490   __ b(slow_case, ne);
4491   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4492 
4493   // make sure klass is initialized & doesn't have finalizer
4494   // make sure klass is fully initialized
4495   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4496   __ cmp(Rtemp, InstanceKlass::fully_initialized);
4497   __ b(slow_case, ne);
4498 
4499   // get instance_size in InstanceKlass (scaled to a count of bytes)
4500   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4501 
4502   // test to see if it has a finalizer or is malformed in some way
4503   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4504   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4505 
4506   // Allocate the instance:
4507   //  If TLAB is enabled:
4508   //    Try to allocate in the TLAB.
4509   //    If fails, go to the slow path.
4510   //  Else If inline contiguous allocations are enabled:
4511   //    Try to allocate in eden.
4512   //    If fails due to heap end, go to slow path.
4513   //
4514   //  If TLAB is enabled OR inline contiguous is enabled:
4515   //    Initialize the allocation.
4516   //    Exit.
4517   //
4518   //  Go to slow path.
4519   if (UseTLAB) {
4520     const Register Rtlab_top = R1_tmp;
4521     const Register Rtlab_end = R2_tmp;
4522     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4523 
4524     __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4525     __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset())));
4526     __ add(Rtlab_top, Robj, Rsize);
4527     __ cmp(Rtlab_top, Rtlab_end);
4528     __ b(slow_case, hi);
4529     __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4530     if (ZeroTLAB) {
4531       // the fields have been already cleared
4532       __ b(initialize_header);
4533     } else {
4534       // initialize both the header and fields
4535       __ b(initialize_object);
4536     }
4537   } else {
4538     // Allocation in the shared Eden, if allowed.
4539     if (allow_shared_alloc) {
4540       const Register Rheap_top_addr = R2_tmp;
4541       const Register Rheap_top = R5_tmp;
4542       const Register Rheap_end = Rtemp;
4543       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4544 
4545       // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4546       __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4547 
4548       Label retry;
4549       __ bind(retry);
4550 
4551 #ifdef AARCH64
4552       __ ldxr(Robj, Rheap_top_addr);
4553 #else
4554       __ ldr(Robj, Address(Rheap_top_addr));
4555 #endif // AARCH64
4556 
4557       __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4558       __ add(Rheap_top, Robj, Rsize);
4559       __ cmp(Rheap_top, Rheap_end);
4560       __ b(slow_case, hi);
4561 
4562       // Update heap top atomically.
4563       // If someone beats us on the allocation, try again, otherwise continue.
4564 #ifdef AARCH64
4565       __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4566       __ cbnz_w(Rtemp2, retry);
4567 #else
4568       __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4569       __ b(retry, ne);
4570 #endif // AARCH64
4571 
4572       __ incr_allocated_bytes(Rsize, Rtemp);
4573     }
4574   }
4575 
4576   if (UseTLAB || allow_shared_alloc) {
4577     const Register Rzero0 = R1_tmp;
4578     const Register Rzero1 = R2_tmp;
4579     const Register Rzero_end = R5_tmp;
4580     const Register Rzero_cur = Rtemp;
4581     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4582 
4583     // The object is initialized before the header.  If the object size is
4584     // zero, go directly to the header initialization.
4585     __ bind(initialize_object);
4586     __ subs(Rsize, Rsize, sizeof(oopDesc));
4587     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4588     __ b(initialize_header, eq);
4589 
4590 #ifdef ASSERT
4591     // make sure Rsize is a multiple of 8
4592     Label L;
4593     __ tst(Rsize, 0x07);
4594     __ b(L, eq);
4595     __ stop("object size is not multiple of 8 - adjust this code");
4596     __ bind(L);
4597 #endif
4598 
4599 #ifdef AARCH64
4600     {
4601       Label loop;
4602       // Step back by 1 word if object size is not a multiple of 2*wordSize.
4603       assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4604       __ andr(Rtemp2, Rsize, (uintx)wordSize);
4605       __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4606 
4607       // Zero by 2 words per iteration.
4608       __ bind(loop);
4609       __ subs(Rsize, Rsize, 2*wordSize);
4610       __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4611       __ b(loop, gt);
4612     }
4613 #else
4614     __ mov(Rzero0, 0);
4615     __ mov(Rzero1, 0);
4616     __ add(Rzero_end, Rzero_cur, Rsize);
4617 
4618     // initialize remaining object fields: Rsize was a multiple of 8
4619     { Label loop;
4620       // loop is unrolled 2 times
4621       __ bind(loop);
4622       // #1
4623       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4624       __ cmp(Rzero_cur, Rzero_end);
4625       // #2
4626       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4627       __ cmp(Rzero_cur, Rzero_end, ne);
4628       __ b(loop, ne);
4629     }
4630 #endif // AARCH64
4631 
4632     // initialize object header only.
4633     __ bind(initialize_header);
4634     if (UseBiasedLocking) {
4635       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4636     } else {
4637       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4638     }
4639     // mark
4640     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4641 
4642     // klass
4643 #ifdef AARCH64
4644     __ store_klass_gap(Robj);
4645 #endif // AARCH64
4646     __ store_klass(Rklass, Robj); // blows Rklass:
4647     Rklass = noreg;
4648 
4649     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4650     if (DTraceAllocProbes) {
4651       // Trigger dtrace event for fastpath
4652       Label Lcontinue;
4653 
4654       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4655       __ cbz(Rtemp, Lcontinue);
4656 
4657       __ push(atos);
4658       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4659       __ pop(atos);
4660 
4661       __ bind(Lcontinue);
4662     }
4663 
4664     __ b(done);
4665   } else {
4666     // jump over literals
4667     __ b(slow_case);
4668   }
4669 
4670   if (allow_shared_alloc) {
4671     __ bind_literal(Lheap_top_addr);
4672   }
4673 
4674   // slow case
4675   __ bind(slow_case);
4676   __ get_constant_pool(Rcpool);
4677   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4678   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4679 
4680   // continue
4681   __ bind(done);
4682 
4683   // StoreStore barrier required after complete initialization
4684   // (headers + content zeroing), before the object may escape.
4685   __ membar(MacroAssembler::StoreStore, R1_tmp);
4686 }
4687 
4688 
4689 void TemplateTable::newarray() {
4690   transition(itos, atos);
4691   __ ldrb(R1, at_bcp(1));
4692   __ mov(R2, R0_tos);
4693   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4694   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4695 }
4696 
4697 
4698 void TemplateTable::anewarray() {
4699   transition(itos, atos);
4700   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4701   __ get_constant_pool(R1);
4702   __ mov(R3, R0_tos);
4703   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4704   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4705 }
4706 
4707 
4708 void TemplateTable::arraylength() {
4709   transition(atos, itos);
4710   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4711   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4712 }
4713 
4714 
4715 void TemplateTable::checkcast() {
4716   transition(atos, atos);
4717   Label done, is_null, quicked, resolved, throw_exception;
4718 
4719   const Register Robj = R0_tos;
4720   const Register Rcpool = R2_tmp;
4721   const Register Rtags = R3_tmp;
4722   const Register Rindex = R4_tmp;
4723   const Register Rsuper = R3_tmp;
4724   const Register Rsub   = R4_tmp;
4725   const Register Rsubtype_check_tmp1 = R1_tmp;
4726   const Register Rsubtype_check_tmp2 = LR_tmp;
4727 
4728   __ cbz(Robj, is_null);
4729 
4730   // Get cpool & tags index
4731   __ get_cpool_and_tags(Rcpool, Rtags);
4732   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4733 
4734   // See if bytecode has already been quicked
4735   __ add(Rtemp, Rtags, Rindex);
4736 #ifdef AARCH64
4737   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4738   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4739   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4740 #else
4741   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4742 #endif // AARCH64
4743 
4744   __ cmp(Rtemp, JVM_CONSTANT_Class);
4745 
4746 #ifndef AARCH64
4747   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4748 #endif // !AARCH64
4749 
4750   __ b(quicked, eq);
4751 
4752   __ push(atos);
4753   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4754   // vm_result_2 has metadata result
4755   __ get_vm_result_2(Rsuper, Robj);
4756   __ pop_ptr(Robj);
4757   __ b(resolved);
4758 
4759   __ bind(throw_exception);
4760   // Come here on failure of subtype check
4761   __ profile_typecheck_failed(R1_tmp);
4762   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4763   __ b(Interpreter::_throw_ClassCastException_entry);
4764 
4765   // Get superklass in Rsuper and subklass in Rsub
4766   __ bind(quicked);
4767   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4768 
4769   __ bind(resolved);
4770   __ load_klass(Rsub, Robj);
4771 
4772   // Generate subtype check. Blows both tmps and Rtemp.
4773   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4774   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4775 
4776   // Come here on success
4777 
4778   // Collect counts on whether this check-cast sees NULLs a lot or not.
4779   if (ProfileInterpreter) {
4780     __ b(done);
4781     __ bind(is_null);
4782     __ profile_null_seen(R1_tmp);
4783   } else {
4784     __ bind(is_null);   // same as 'done'
4785   }
4786   __ bind(done);
4787 }
4788 
4789 
4790 void TemplateTable::instanceof() {
4791   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4792   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4793 
4794   transition(atos, itos);
4795   Label done, is_null, not_subtype, quicked, resolved;
4796 
4797   const Register Robj = R0_tos;
4798   const Register Rcpool = R2_tmp;
4799   const Register Rtags = R3_tmp;
4800   const Register Rindex = R4_tmp;
4801   const Register Rsuper = R3_tmp;
4802   const Register Rsub   = R4_tmp;
4803   const Register Rsubtype_check_tmp1 = R0_tmp;
4804   const Register Rsubtype_check_tmp2 = R1_tmp;
4805 
4806   __ cbz(Robj, is_null);
4807 
4808   __ load_klass(Rsub, Robj);
4809 
4810   // Get cpool & tags index
4811   __ get_cpool_and_tags(Rcpool, Rtags);
4812   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4813 
4814   // See if bytecode has already been quicked
4815   __ add(Rtemp, Rtags, Rindex);
4816 #ifdef AARCH64
4817   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4818   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4819   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4820 #else
4821   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4822 #endif // AARCH64
4823   __ cmp(Rtemp, JVM_CONSTANT_Class);
4824 
4825 #ifndef AARCH64
4826   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4827 #endif // !AARCH64
4828 
4829   __ b(quicked, eq);
4830 
4831   __ push(atos);
4832   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4833   // vm_result_2 has metadata result
4834   __ get_vm_result_2(Rsuper, Robj);
4835   __ pop_ptr(Robj);
4836   __ b(resolved);
4837 
4838   // Get superklass in Rsuper and subklass in Rsub
4839   __ bind(quicked);
4840   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4841 
4842   __ bind(resolved);
4843   __ load_klass(Rsub, Robj);
4844 
4845   // Generate subtype check. Blows both tmps and Rtemp.
4846   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4847 
4848   // Come here on success
4849   __ mov(R0_tos, 1);
4850   __ b(done);
4851 
4852   __ bind(not_subtype);
4853   // Come here on failure
4854   __ profile_typecheck_failed(R1_tmp);
4855   __ mov(R0_tos, 0);
4856 
4857   // Collect counts on whether this test sees NULLs a lot or not.
4858   if (ProfileInterpreter) {
4859     __ b(done);
4860     __ bind(is_null);
4861     __ profile_null_seen(R1_tmp);
4862   } else {
4863     __ bind(is_null);   // same as 'done'
4864   }
4865   __ bind(done);
4866 }
4867 
4868 
4869 //----------------------------------------------------------------------------------------------------
4870 // Breakpoints
4871 void TemplateTable::_breakpoint() {
4872 
4873   // Note: We get here even if we are single stepping..
4874   // jbug inists on setting breakpoints at every bytecode
4875   // even if we are in single step mode.
4876 
4877   transition(vtos, vtos);
4878 
4879   // get the unpatched byte code
4880   __ mov(R1, Rmethod);
4881   __ mov(R2, Rbcp);
4882   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4883 #ifdef AARCH64
4884   __ sxtw(Rtmp_save0, R0);
4885 #else
4886   __ mov(Rtmp_save0, R0);
4887 #endif // AARCH64
4888 
4889   // post the breakpoint event
4890   __ mov(R1, Rmethod);
4891   __ mov(R2, Rbcp);
4892   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4893 
4894   // complete the execution of original bytecode
4895   __ mov(R3_bytecode, Rtmp_save0);
4896   __ dispatch_only_normal(vtos);
4897 }
4898 
4899 
4900 //----------------------------------------------------------------------------------------------------
4901 // Exceptions
4902 
4903 void TemplateTable::athrow() {
4904   transition(atos, vtos);
4905   __ mov(Rexception_obj, R0_tos);
4906   __ null_check(Rexception_obj, Rtemp);
4907   __ b(Interpreter::throw_exception_entry());
4908 }
4909 
4910 
4911 //----------------------------------------------------------------------------------------------------
4912 // Synchronization
4913 //
4914 // Note: monitorenter & exit are symmetric routines; which is reflected
4915 //       in the assembly code structure as well
4916 //
4917 // Stack layout:
4918 //
4919 // [expressions  ] <--- Rstack_top        = expression stack top
4920 // ..
4921 // [expressions  ]
4922 // [monitor entry] <--- monitor block top = expression stack bot
4923 // ..
4924 // [monitor entry]
4925 // [frame data   ] <--- monitor block bot
4926 // ...
4927 // [saved FP     ] <--- FP
4928 
4929 
4930 void TemplateTable::monitorenter() {
4931   transition(atos, vtos);
4932 
4933   const Register Robj = R0_tos;
4934   const Register Rentry = R1_tmp;
4935 
4936   // check for NULL object
4937   __ null_check(Robj, Rtemp);
4938 
4939   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4940   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4941   Label allocate_monitor, allocated;
4942 
4943   // initialize entry pointer
4944   __ mov(Rentry, 0);                             // points to free slot or NULL
4945 
4946   // find a free slot in the monitor block (result in Rentry)
4947   { Label loop, exit;
4948     const Register Rcur = R2_tmp;
4949     const Register Rcur_obj = Rtemp;
4950     const Register Rbottom = R3_tmp;
4951     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4952 
4953     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4954                                  // points to current entry, starting with top-most entry
4955     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4956                                  // points to word before bottom of monitor block
4957 
4958     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4959 #ifndef AARCH64
4960     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4961                                                  // prefetch monitor's object for the first iteration
4962 #endif // !AARCH64
4963     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4964 
4965     __ bind(loop);
4966 #ifdef AARCH64
4967     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4968 #endif // AARCH64
4969     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4970     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4971 
4972     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4973     __ b(exit, eq);                              // if same object then stop searching
4974 
4975     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4976 
4977     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4978 #ifndef AARCH64
4979     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4980                                                  // prefetch monitor's object for the next iteration
4981 #endif // !AARCH64
4982     __ b(loop, ne);                              // if not at bottom then check this entry
4983     __ bind(exit);
4984   }
4985 
4986   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4987 
4988   __ bind(allocate_monitor);
4989 
4990   // allocate one if there's no free slot
4991   { Label loop;
4992     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4993 
4994     // 1. compute new pointers
4995 
4996 #ifdef AARCH64
4997     __ check_extended_sp(Rtemp);
4998     __ sub(SP, SP, entry_size);                  // adjust extended SP
4999     __ mov(Rtemp, SP);
5000     __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
5001 #endif // AARCH64
5002 
5003     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
5004                                                  // old monitor block top / expression stack bottom
5005 
5006     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
5007     __ check_stack_top_on_expansion();
5008 
5009     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
5010 
5011     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
5012 
5013     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
5014                                                  // set new monitor block top
5015 
5016     // 2. move expression stack contents
5017 
5018     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
5019 #ifndef AARCH64
5020     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
5021 #endif // !AARCH64
5022     __ b(allocated, eq);
5023 
5024     __ bind(loop);
5025 #ifdef AARCH64
5026     __ ldr(Rtemp, Address(R2_tmp, entry_size));             // load expression stack word from old location
5027 #endif // AARCH64
5028     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
5029                                                             // and advance to next word
5030     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
5031 #ifndef AARCH64
5032     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
5033 #endif // !AARCH64
5034     __ b(loop, ne);                                         // if not at bottom then copy next word
5035   }
5036 
5037   // call run-time routine
5038 
5039   // Rentry: points to monitor entry
5040   __ bind(allocated);
5041 
5042   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
5043   // The object has already been poped from the stack, so the expression stack looks correct.
5044   __ add(Rbcp, Rbcp, 1);
5045 
5046   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
5047   __ lock_object(Rentry);
5048 
5049   // check to make sure this monitor doesn't cause stack overflow after locking
5050   __ save_bcp();  // in case of exception
5051   __ arm_stack_overflow_check(0, Rtemp);
5052 
5053   // The bcp has already been incremented. Just need to dispatch to next instruction.
5054   __ dispatch_next(vtos);
5055 }
5056 
5057 
5058 void TemplateTable::monitorexit() {
5059   transition(atos, vtos);
5060 
5061   const Register Robj = R0_tos;
5062   const Register Rcur = R1_tmp;
5063   const Register Rbottom = R2_tmp;
5064   const Register Rcur_obj = Rtemp;
5065 
5066   // check for NULL object
5067   __ null_check(Robj, Rtemp);
5068 
5069   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
5070   Label found, throw_exception;
5071 
5072   // find matching slot
5073   { Label loop;
5074     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
5075 
5076     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
5077                                  // points to current entry, starting with top-most entry
5078     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
5079                                  // points to word before bottom of monitor block
5080 
5081     __ cmp(Rcur, Rbottom);                       // check if bottom reached
5082 #ifndef AARCH64
5083     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
5084                                                  // prefetch monitor's object for the first iteration
5085 #endif // !AARCH64
5086     __ b(throw_exception, eq);                   // throw exception if there are now monitors
5087 
5088     __ bind(loop);
5089 #ifdef AARCH64
5090     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
5091 #endif // AARCH64
5092     // check if current entry is for same object
5093     __ cmp(Rcur_obj, Robj);
5094     __ b(found, eq);                             // if same object then stop searching
5095     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
5096     __ cmp(Rcur, Rbottom);                       // check if bottom reached
5097 #ifndef AARCH64
5098     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
5099 #endif // !AARCH64
5100     __ b (loop, ne);                             // if not at bottom then check this entry
5101   }
5102 
5103   // error handling. Unlocking was not block-structured
5104   __ bind(throw_exception);
5105   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
5106   __ should_not_reach_here();
5107 
5108   // call run-time routine
5109   // Rcur: points to monitor entry
5110   __ bind(found);
5111   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
5112   __ unlock_object(Rcur);
5113   __ pop_ptr(Robj);                              // discard object
5114 }
5115 
5116 
5117 //----------------------------------------------------------------------------------------------------
5118 // Wide instructions
5119 
5120 void TemplateTable::wide() {
5121   transition(vtos, vtos);
5122   __ ldrb(R3_bytecode, at_bcp(1));
5123 
5124   InlinedAddress Ltable((address)Interpreter::_wentry_point);
5125   __ ldr_literal(Rtemp, Ltable);
5126   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
5127 
5128   __ nop(); // to avoid filling CPU pipeline with invalid instructions
5129   __ nop();
5130   __ bind_literal(Ltable);
5131 }
5132 
5133 
5134 //----------------------------------------------------------------------------------------------------
5135 // Multi arrays
5136 
5137 void TemplateTable::multianewarray() {
5138   transition(vtos, atos);
5139   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
5140 
5141   // last dim is on top of stack; we want address of first one:
5142   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
5143   // the latter wordSize to point to the beginning of the array.
5144   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5145   __ sub(R1, Rtemp, wordSize);
5146 
5147   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
5148   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5149   // MacroAssembler::StoreStore useless (included in the runtime exit path)
5150 }