1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "interpreter/interp_masm.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/templateTable.hpp"
  31 #include "memory/universe.hpp"
  32 #include "oops/cpCache.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/synchronizer.hpp"
  40 
  41 #define __ _masm->
  42 
  43 //----------------------------------------------------------------------------------------------------
  44 // Platform-dependent initialization
  45 
  46 void TemplateTable::pd_initialize() {
  47   // No arm specific initialization
  48 }
  49 
  50 //----------------------------------------------------------------------------------------------------
  51 // Address computation
  52 
  53 // local variables
  54 static inline Address iaddress(int n)            {
  55   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  56 }
  57 
  58 static inline Address laddress(int n)            { return iaddress(n + 1); }
  59 #ifndef AARCH64
  60 static inline Address haddress(int n)            { return iaddress(n + 0); }
  61 #endif // !AARCH64
  62 
  63 static inline Address faddress(int n)            { return iaddress(n); }
  64 static inline Address daddress(int n)            { return laddress(n); }
  65 static inline Address aaddress(int n)            { return iaddress(n); }
  66 
  67 
  68 void TemplateTable::get_local_base_addr(Register r, Register index) {
  69   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  70 }
  71 
  72 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  73 #ifdef AARCH64
  74   get_local_base_addr(scratch, index);
  75   return Address(scratch);
  76 #else
  77   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  78 #endif // AARCH64
  79 }
  80 
  81 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  82   return load_iaddress(index, scratch);
  83 }
  84 
  85 Address TemplateTable::load_faddress(Register index, Register scratch) {
  86 #ifdef __SOFTFP__
  87   return load_iaddress(index, scratch);
  88 #else
  89   get_local_base_addr(scratch, index);
  90   return Address(scratch);
  91 #endif // __SOFTFP__
  92 }
  93 
  94 Address TemplateTable::load_daddress(Register index, Register scratch) {
  95   get_local_base_addr(scratch, index);
  96   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  97 }
  98 
  99 // At top of Java expression stack which may be different than SP.
 100 // It isn't for category 1 objects.
 101 static inline Address at_tos() {
 102   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
 103 }
 104 
 105 static inline Address at_tos_p1() {
 106   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 107 }
 108 
 109 static inline Address at_tos_p2() {
 110   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 111 }
 112 
 113 
 114 // 32-bit ARM:
 115 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 116 // separate ldr instructions (supports nonadjacent values).
 117 // Used for longs in all modes, and for doubles in SOFTFP mode.
 118 //
 119 // AArch64: loads long local into R0_tos.
 120 //
 121 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 122   const Register Rlocal_base = tmp;
 123   assert_different_registers(Rlocal_index, tmp);
 124 
 125   get_local_base_addr(Rlocal_base, Rlocal_index);
 126 #ifdef AARCH64
 127   __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 128 #else
 129   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 130   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 131 #endif // AARCH64
 132 }
 133 
 134 
 135 // 32-bit ARM:
 136 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 137 // separate str instructions (supports nonadjacent values).
 138 // Used for longs in all modes, and for doubles in SOFTFP mode
 139 //
 140 // AArch64: stores R0_tos to long local.
 141 //
 142 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 143   const Register Rlocal_base = tmp;
 144   assert_different_registers(Rlocal_index, tmp);
 145 
 146   get_local_base_addr(Rlocal_base, Rlocal_index);
 147 #ifdef AARCH64
 148   __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 149 #else
 150   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 151   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 152 #endif // AARCH64
 153 }
 154 
 155 // Returns address of Java array element using temp register as address base.
 156 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 157   int logElemSize = exact_log2(type2aelembytes(elemType));
 158   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 159   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 160 }
 161 
 162 //----------------------------------------------------------------------------------------------------
 163 // Condition conversion
 164 AsmCondition convNegCond(TemplateTable::Condition cc) {
 165   switch (cc) {
 166     case TemplateTable::equal        : return ne;
 167     case TemplateTable::not_equal    : return eq;
 168     case TemplateTable::less         : return ge;
 169     case TemplateTable::less_equal   : return gt;
 170     case TemplateTable::greater      : return le;
 171     case TemplateTable::greater_equal: return lt;
 172   }
 173   ShouldNotReachHere();
 174   return nv;
 175 }
 176 
 177 //----------------------------------------------------------------------------------------------------
 178 // Miscelaneous helper routines
 179 
 180 // Store an oop (or NULL) at the address described by obj.
 181 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 182 // Also destroys new_val and obj.base().
 183 static void do_oop_store(InterpreterMacroAssembler* _masm,
 184                          Address obj,
 185                          Register new_val,
 186                          Register tmp1,
 187                          Register tmp2,
 188                          Register tmp3,
 189                          BarrierSet::Name barrier,
 190                          bool precise,
 191                          bool is_null) {
 192 
 193   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 194   switch (barrier) {
 195 #if INCLUDE_ALL_GCS
 196     case BarrierSet::G1BarrierSet:
 197       {
 198         // flatten object address if needed
 199         assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
 200 
 201         const Register store_addr = obj.base();
 202         if (obj.index() != noreg) {
 203           assert (obj.disp() == 0, "index or displacement, not both");
 204 #ifdef AARCH64
 205           __ add(store_addr, obj.base(), obj.index(), obj.extend(), obj.shift_imm());
 206 #else
 207           assert(obj.offset_op() == add_offset, "addition is expected");
 208           __ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
 209 #endif // AARCH64
 210         } else if (obj.disp() != 0) {
 211           __ add(store_addr, obj.base(), obj.disp());
 212         }
 213 
 214         __ g1_write_barrier_pre(store_addr, new_val, tmp1, tmp2, tmp3);
 215         if (is_null) {
 216           __ store_heap_oop_null(new_val, Address(store_addr));
 217         } else {
 218           // G1 barrier needs uncompressed oop for region cross check.
 219           Register val_to_store = new_val;
 220           if (UseCompressedOops) {
 221             val_to_store = tmp1;
 222             __ mov(val_to_store, new_val);
 223           }
 224           __ store_heap_oop(val_to_store, Address(store_addr)); // blows val_to_store:
 225           val_to_store = noreg;
 226           __ g1_write_barrier_post(store_addr, new_val, tmp1, tmp2, tmp3);
 227         }
 228       }
 229       break;
 230 #endif // INCLUDE_ALL_GCS
 231     case BarrierSet::CardTableBarrierSet:
 232       {
 233         if (is_null) {
 234           __ store_heap_oop_null(new_val, obj);
 235         } else {
 236           assert (!precise || (obj.index() == noreg && obj.disp() == 0),
 237                   "store check address should be calculated beforehand");
 238 
 239           __ store_check_part1(tmp1);
 240           __ store_heap_oop(new_val, obj); // blows new_val:
 241           new_val = noreg;
 242           __ store_check_part2(obj.base(), tmp1, tmp2);
 243         }
 244       }
 245       break;
 246     case BarrierSet::ModRef:
 247       ShouldNotReachHere();
 248       break;
 249     default:
 250       ShouldNotReachHere();
 251       break;
 252   }
 253 }
 254 
 255 Address TemplateTable::at_bcp(int offset) {
 256   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 257   return Address(Rbcp, offset);
 258 }
 259 
 260 
 261 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
 262 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 263                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 264                                    int byte_no) {
 265   assert_different_registers(bc_reg, temp_reg);
 266   if (!RewriteBytecodes)  return;
 267   Label L_patch_done;
 268 
 269   switch (bc) {
 270   case Bytecodes::_fast_aputfield:
 271   case Bytecodes::_fast_bputfield:
 272   case Bytecodes::_fast_zputfield:
 273   case Bytecodes::_fast_cputfield:
 274   case Bytecodes::_fast_dputfield:
 275   case Bytecodes::_fast_fputfield:
 276   case Bytecodes::_fast_iputfield:
 277   case Bytecodes::_fast_lputfield:
 278   case Bytecodes::_fast_sputfield:
 279     {
 280       // We skip bytecode quickening for putfield instructions when
 281       // the put_code written to the constant pool cache is zero.
 282       // This is required so that every execution of this instruction
 283       // calls out to InterpreterRuntime::resolve_get_put to do
 284       // additional, required work.
 285       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 286       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 287       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 288       __ mov(bc_reg, bc);
 289       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 290     }
 291     break;
 292   default:
 293     assert(byte_no == -1, "sanity");
 294     // the pair bytecodes have already done the load.
 295     if (load_bc_into_bc_reg) {
 296       __ mov(bc_reg, bc);
 297     }
 298   }
 299 
 300   if (__ can_post_breakpoint()) {
 301     Label L_fast_patch;
 302     // if a breakpoint is present we can't rewrite the stream directly
 303     __ ldrb(temp_reg, at_bcp(0));
 304     __ cmp(temp_reg, Bytecodes::_breakpoint);
 305     __ b(L_fast_patch, ne);
 306     if (bc_reg != R3) {
 307       __ mov(R3, bc_reg);
 308     }
 309     __ mov(R1, Rmethod);
 310     __ mov(R2, Rbcp);
 311     // Let breakpoint table handling rewrite to quicker bytecode
 312     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 313     __ b(L_patch_done);
 314     __ bind(L_fast_patch);
 315   }
 316 
 317 #ifdef ASSERT
 318   Label L_okay;
 319   __ ldrb(temp_reg, at_bcp(0));
 320   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 321   __ b(L_okay, eq);
 322   __ cmp(temp_reg, bc_reg);
 323   __ b(L_okay, eq);
 324   __ stop("patching the wrong bytecode");
 325   __ bind(L_okay);
 326 #endif
 327 
 328   // patch bytecode
 329   __ strb(bc_reg, at_bcp(0));
 330   __ bind(L_patch_done);
 331 }
 332 
 333 //----------------------------------------------------------------------------------------------------
 334 // Individual instructions
 335 
 336 void TemplateTable::nop() {
 337   transition(vtos, vtos);
 338   // nothing to do
 339 }
 340 
 341 void TemplateTable::shouldnotreachhere() {
 342   transition(vtos, vtos);
 343   __ stop("shouldnotreachhere bytecode");
 344 }
 345 
 346 
 347 
 348 void TemplateTable::aconst_null() {
 349   transition(vtos, atos);
 350   __ mov(R0_tos, 0);
 351 }
 352 
 353 
 354 void TemplateTable::iconst(int value) {
 355   transition(vtos, itos);
 356   __ mov_slow(R0_tos, value);
 357 }
 358 
 359 
 360 void TemplateTable::lconst(int value) {
 361   transition(vtos, ltos);
 362   assert((value == 0) || (value == 1), "unexpected long constant");
 363   __ mov(R0_tos, value);
 364 #ifndef AARCH64
 365   __ mov(R1_tos_hi, 0);
 366 #endif // !AARCH64
 367 }
 368 
 369 
 370 void TemplateTable::fconst(int value) {
 371   transition(vtos, ftos);
 372 #ifdef AARCH64
 373   switch(value) {
 374   case 0:   __ fmov_sw(S0_tos, ZR);    break;
 375   case 1:   __ fmov_s (S0_tos, 0x70);  break;
 376   case 2:   __ fmov_s (S0_tos, 0x00);  break;
 377   default:  ShouldNotReachHere();      break;
 378   }
 379 #else
 380   const int zero = 0;         // 0.0f
 381   const int one = 0x3f800000; // 1.0f
 382   const int two = 0x40000000; // 2.0f
 383 
 384   switch(value) {
 385   case 0:   __ mov(R0_tos, zero);   break;
 386   case 1:   __ mov(R0_tos, one);    break;
 387   case 2:   __ mov(R0_tos, two);    break;
 388   default:  ShouldNotReachHere();   break;
 389   }
 390 
 391 #ifndef __SOFTFP__
 392   __ fmsr(S0_tos, R0_tos);
 393 #endif // !__SOFTFP__
 394 #endif // AARCH64
 395 }
 396 
 397 
 398 void TemplateTable::dconst(int value) {
 399   transition(vtos, dtos);
 400 #ifdef AARCH64
 401   switch(value) {
 402   case 0:   __ fmov_dx(D0_tos, ZR);    break;
 403   case 1:   __ fmov_d (D0_tos, 0x70);  break;
 404   default:  ShouldNotReachHere();      break;
 405   }
 406 #else
 407   const int one_lo = 0;            // low part of 1.0
 408   const int one_hi = 0x3ff00000;   // high part of 1.0
 409 
 410   if (value == 0) {
 411 #ifdef __SOFTFP__
 412     __ mov(R0_tos_lo, 0);
 413     __ mov(R1_tos_hi, 0);
 414 #else
 415     __ mov(R0_tmp, 0);
 416     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 417 #endif // __SOFTFP__
 418   } else if (value == 1) {
 419     __ mov(R0_tos_lo, one_lo);
 420     __ mov_slow(R1_tos_hi, one_hi);
 421 #ifndef __SOFTFP__
 422     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 423 #endif // !__SOFTFP__
 424   } else {
 425     ShouldNotReachHere();
 426   }
 427 #endif // AARCH64
 428 }
 429 
 430 
 431 void TemplateTable::bipush() {
 432   transition(vtos, itos);
 433   __ ldrsb(R0_tos, at_bcp(1));
 434 }
 435 
 436 
 437 void TemplateTable::sipush() {
 438   transition(vtos, itos);
 439   __ ldrsb(R0_tmp, at_bcp(1));
 440   __ ldrb(R1_tmp, at_bcp(2));
 441   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 442 }
 443 
 444 
 445 void TemplateTable::ldc(bool wide) {
 446   transition(vtos, vtos);
 447   Label fastCase, Done;
 448 
 449   const Register Rindex = R1_tmp;
 450   const Register Rcpool = R2_tmp;
 451   const Register Rtags  = R3_tmp;
 452   const Register RtagType = R3_tmp;
 453 
 454   if (wide) {
 455     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 456   } else {
 457     __ ldrb(Rindex, at_bcp(1));
 458   }
 459   __ get_cpool_and_tags(Rcpool, Rtags);
 460 
 461   const int base_offset = ConstantPool::header_size() * wordSize;
 462   const int tags_offset = Array<u1>::base_offset_in_bytes();
 463 
 464   // get const type
 465   __ add(Rtemp, Rtags, tags_offset);
 466 #ifdef AARCH64
 467   __ add(Rtemp, Rtemp, Rindex);
 468   __ ldarb(RtagType, Rtemp);  // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
 469 #else
 470   __ ldrb(RtagType, Address(Rtemp, Rindex));
 471   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 472 #endif // AARCH64
 473 
 474   // unresolved class - get the resolved class
 475   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 476 
 477   // unresolved class in error (resolution failed) - call into runtime
 478   // so that the same error from first resolution attempt is thrown.
 479 #ifdef AARCH64
 480   __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
 481   __ cond_cmp(RtagType, Rtemp, ne);
 482 #else
 483   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 484 #endif // AARCH64
 485 
 486   // resolved class - need to call vm to get java mirror of the class
 487   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 488 
 489   __ b(fastCase, ne);
 490 
 491   // slow case - call runtime
 492   __ mov(R1, wide);
 493   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 494   __ push(atos);
 495   __ b(Done);
 496 
 497   // int, float, String
 498   __ bind(fastCase);
 499 #ifdef ASSERT
 500   { Label L;
 501     __ cmp(RtagType, JVM_CONSTANT_Integer);
 502     __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 503     __ b(L, eq);
 504     __ stop("unexpected tag type in ldc");
 505     __ bind(L);
 506   }
 507 #endif // ASSERT
 508   // itos, ftos
 509   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 510   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 511 
 512   // floats and ints are placed on stack in the same way, so
 513   // we can use push(itos) to transfer float value without VFP
 514   __ push(itos);
 515   __ bind(Done);
 516 }
 517 
 518 // Fast path for caching oop constants.
 519 void TemplateTable::fast_aldc(bool wide) {
 520   transition(vtos, atos);
 521   int index_size = wide ? sizeof(u2) : sizeof(u1);
 522   Label resolved;
 523 
 524   // We are resolved if the resolved reference cache entry contains a
 525   // non-null object (CallSite, etc.)
 526   assert_different_registers(R0_tos, R2_tmp);
 527   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 528   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 529   __ cbnz(R0_tos, resolved);
 530 
 531   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 532 
 533   // first time invocation - must resolve first
 534   __ mov(R1, (int)bytecode());
 535   __ call_VM(R0_tos, entry, R1);
 536   __ bind(resolved);
 537 
 538   if (VerifyOops) {
 539     __ verify_oop(R0_tos);
 540   }
 541 }
 542 
 543 void TemplateTable::ldc2_w() {
 544   transition(vtos, vtos);
 545   const Register Rtags  = R2_tmp;
 546   const Register Rindex = R3_tmp;
 547   const Register Rcpool = R4_tmp;
 548   const Register Rbase  = R5_tmp;
 549 
 550   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 551 
 552   __ get_cpool_and_tags(Rcpool, Rtags);
 553   const int base_offset = ConstantPool::header_size() * wordSize;
 554   const int tags_offset = Array<u1>::base_offset_in_bytes();
 555 
 556   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 557 
 558 #ifdef __ABI_HARD__
 559   Label Long, exit;
 560   // get type from tags
 561   __ add(Rtemp, Rtags, tags_offset);
 562   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 563   __ cmp(Rtemp, JVM_CONSTANT_Double);
 564   __ b(Long, ne);
 565   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 566 
 567   __ push(dtos);
 568   __ b(exit);
 569   __ bind(Long);
 570 #endif
 571 
 572 #ifdef AARCH64
 573   __ ldr(R0_tos, Address(Rbase, base_offset));
 574 #else
 575   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 576   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 577 #endif // AARCH64
 578   __ push(ltos);
 579 
 580 #ifdef __ABI_HARD__
 581   __ bind(exit);
 582 #endif
 583 }
 584 
 585 
 586 void TemplateTable::locals_index(Register reg, int offset) {
 587   __ ldrb(reg, at_bcp(offset));
 588 }
 589 
 590 void TemplateTable::iload() {
 591   iload_internal();
 592 }
 593 
 594 void TemplateTable::nofast_iload() {
 595   iload_internal(may_not_rewrite);
 596 }
 597 
 598 void TemplateTable::iload_internal(RewriteControl rc) {
 599   transition(vtos, itos);
 600 
 601   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 602     Label rewrite, done;
 603     const Register next_bytecode = R1_tmp;
 604     const Register target_bytecode = R2_tmp;
 605 
 606     // get next byte
 607     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 608     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 609     // last two iloads in a pair.  Comparing against fast_iload means that
 610     // the next bytecode is neither an iload or a caload, and therefore
 611     // an iload pair.
 612     __ cmp(next_bytecode, Bytecodes::_iload);
 613     __ b(done, eq);
 614 
 615     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 616     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 617     __ b(rewrite, eq);
 618 
 619     // if _caload, rewrite to fast_icaload
 620     __ cmp(next_bytecode, Bytecodes::_caload);
 621     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 622     __ b(rewrite, eq);
 623 
 624     // rewrite so iload doesn't check again.
 625     __ mov(target_bytecode, Bytecodes::_fast_iload);
 626 
 627     // rewrite
 628     // R2: fast bytecode
 629     __ bind(rewrite);
 630     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 631     __ bind(done);
 632   }
 633 
 634   // Get the local value into tos
 635   const Register Rlocal_index = R1_tmp;
 636   locals_index(Rlocal_index);
 637   Address local = load_iaddress(Rlocal_index, Rtemp);
 638   __ ldr_s32(R0_tos, local);
 639 }
 640 
 641 
 642 void TemplateTable::fast_iload2() {
 643   transition(vtos, itos);
 644   const Register Rlocal_index = R1_tmp;
 645 
 646   locals_index(Rlocal_index);
 647   Address local = load_iaddress(Rlocal_index, Rtemp);
 648   __ ldr_s32(R0_tos, local);
 649   __ push(itos);
 650 
 651   locals_index(Rlocal_index, 3);
 652   local = load_iaddress(Rlocal_index, Rtemp);
 653   __ ldr_s32(R0_tos, local);
 654 }
 655 
 656 void TemplateTable::fast_iload() {
 657   transition(vtos, itos);
 658   const Register Rlocal_index = R1_tmp;
 659 
 660   locals_index(Rlocal_index);
 661   Address local = load_iaddress(Rlocal_index, Rtemp);
 662   __ ldr_s32(R0_tos, local);
 663 }
 664 
 665 
 666 void TemplateTable::lload() {
 667   transition(vtos, ltos);
 668   const Register Rlocal_index = R2_tmp;
 669 
 670   locals_index(Rlocal_index);
 671   load_category2_local(Rlocal_index, R3_tmp);
 672 }
 673 
 674 
 675 void TemplateTable::fload() {
 676   transition(vtos, ftos);
 677   const Register Rlocal_index = R2_tmp;
 678 
 679   // Get the local value into tos
 680   locals_index(Rlocal_index);
 681   Address local = load_faddress(Rlocal_index, Rtemp);
 682 #ifdef __SOFTFP__
 683   __ ldr(R0_tos, local);
 684 #else
 685   __ ldr_float(S0_tos, local);
 686 #endif // __SOFTFP__
 687 }
 688 
 689 
 690 void TemplateTable::dload() {
 691   transition(vtos, dtos);
 692   const Register Rlocal_index = R2_tmp;
 693 
 694   locals_index(Rlocal_index);
 695 
 696 #ifdef __SOFTFP__
 697   load_category2_local(Rlocal_index, R3_tmp);
 698 #else
 699   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 700 #endif // __SOFTFP__
 701 }
 702 
 703 
 704 void TemplateTable::aload() {
 705   transition(vtos, atos);
 706   const Register Rlocal_index = R1_tmp;
 707 
 708   locals_index(Rlocal_index);
 709   Address local = load_aaddress(Rlocal_index, Rtemp);
 710   __ ldr(R0_tos, local);
 711 }
 712 
 713 
 714 void TemplateTable::locals_index_wide(Register reg) {
 715   assert_different_registers(reg, Rtemp);
 716   __ ldrb(Rtemp, at_bcp(2));
 717   __ ldrb(reg, at_bcp(3));
 718   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 719 }
 720 
 721 
 722 void TemplateTable::wide_iload() {
 723   transition(vtos, itos);
 724   const Register Rlocal_index = R2_tmp;
 725 
 726   locals_index_wide(Rlocal_index);
 727   Address local = load_iaddress(Rlocal_index, Rtemp);
 728   __ ldr_s32(R0_tos, local);
 729 }
 730 
 731 
 732 void TemplateTable::wide_lload() {
 733   transition(vtos, ltos);
 734   const Register Rlocal_index = R2_tmp;
 735   const Register Rlocal_base = R3_tmp;
 736 
 737   locals_index_wide(Rlocal_index);
 738   load_category2_local(Rlocal_index, R3_tmp);
 739 }
 740 
 741 
 742 void TemplateTable::wide_fload() {
 743   transition(vtos, ftos);
 744   const Register Rlocal_index = R2_tmp;
 745 
 746   locals_index_wide(Rlocal_index);
 747   Address local = load_faddress(Rlocal_index, Rtemp);
 748 #ifdef __SOFTFP__
 749   __ ldr(R0_tos, local);
 750 #else
 751   __ ldr_float(S0_tos, local);
 752 #endif // __SOFTFP__
 753 }
 754 
 755 
 756 void TemplateTable::wide_dload() {
 757   transition(vtos, dtos);
 758   const Register Rlocal_index = R2_tmp;
 759 
 760   locals_index_wide(Rlocal_index);
 761 #ifdef __SOFTFP__
 762   load_category2_local(Rlocal_index, R3_tmp);
 763 #else
 764   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 765 #endif // __SOFTFP__
 766 }
 767 
 768 
 769 void TemplateTable::wide_aload() {
 770   transition(vtos, atos);
 771   const Register Rlocal_index = R2_tmp;
 772 
 773   locals_index_wide(Rlocal_index);
 774   Address local = load_aaddress(Rlocal_index, Rtemp);
 775   __ ldr(R0_tos, local);
 776 }
 777 
 778 void TemplateTable::index_check(Register array, Register index) {
 779   // Pop ptr into array
 780   __ pop_ptr(array);
 781   index_check_without_pop(array, index);
 782 }
 783 
 784 void TemplateTable::index_check_without_pop(Register array, Register index) {
 785   assert_different_registers(array, index, Rtemp);
 786   // check array
 787   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 788   // check index
 789   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 790   __ cmp_32(index, Rtemp);
 791   if (index != R4_ArrayIndexOutOfBounds_index) {
 792     // convention with generate_ArrayIndexOutOfBounds_handler()
 793     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 794   }
 795   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 796 }
 797 
 798 
 799 void TemplateTable::iaload() {
 800   transition(itos, itos);
 801   const Register Rarray = R1_tmp;
 802   const Register Rindex = R0_tos;
 803 
 804   index_check(Rarray, Rindex);
 805   __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
 806 }
 807 
 808 
 809 void TemplateTable::laload() {
 810   transition(itos, ltos);
 811   const Register Rarray = R1_tmp;
 812   const Register Rindex = R0_tos;
 813 
 814   index_check(Rarray, Rindex);
 815 
 816 #ifdef AARCH64
 817   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 818 #else
 819   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 820   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
 821   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 822 #endif // AARCH64
 823 }
 824 
 825 
 826 void TemplateTable::faload() {
 827   transition(itos, ftos);
 828   const Register Rarray = R1_tmp;
 829   const Register Rindex = R0_tos;
 830 
 831   index_check(Rarray, Rindex);
 832 
 833   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
 834 #ifdef __SOFTFP__
 835   __ ldr(R0_tos, addr);
 836 #else
 837   __ ldr_float(S0_tos, addr);
 838 #endif // __SOFTFP__
 839 }
 840 
 841 
 842 void TemplateTable::daload() {
 843   transition(itos, dtos);
 844   const Register Rarray = R1_tmp;
 845   const Register Rindex = R0_tos;
 846 
 847   index_check(Rarray, Rindex);
 848 
 849 #ifdef __SOFTFP__
 850   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 851   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
 852   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 853 #else
 854   __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
 855 #endif // __SOFTFP__
 856 }
 857 
 858 
 859 void TemplateTable::aaload() {
 860   transition(itos, atos);
 861   const Register Rarray = R1_tmp;
 862   const Register Rindex = R0_tos;
 863 
 864   index_check(Rarray, Rindex);
 865   __ load_heap_oop(R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp));
 866 }
 867 
 868 
 869 void TemplateTable::baload() {
 870   transition(itos, itos);
 871   const Register Rarray = R1_tmp;
 872   const Register Rindex = R0_tos;
 873 
 874   index_check(Rarray, Rindex);
 875   __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
 876 }
 877 
 878 
 879 void TemplateTable::caload() {
 880   transition(itos, itos);
 881   const Register Rarray = R1_tmp;
 882   const Register Rindex = R0_tos;
 883 
 884   index_check(Rarray, Rindex);
 885   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 886 }
 887 
 888 
 889 // iload followed by caload frequent pair
 890 void TemplateTable::fast_icaload() {
 891   transition(vtos, itos);
 892   const Register Rlocal_index = R1_tmp;
 893   const Register Rarray = R1_tmp;
 894   const Register Rindex = R4_tmp; // index_check prefers index on R4
 895   assert_different_registers(Rlocal_index, Rindex);
 896   assert_different_registers(Rarray, Rindex);
 897 
 898   // load index out of locals
 899   locals_index(Rlocal_index);
 900   Address local = load_iaddress(Rlocal_index, Rtemp);
 901   __ ldr_s32(Rindex, local);
 902 
 903   // get array element
 904   index_check(Rarray, Rindex);
 905   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 906 }
 907 
 908 
 909 void TemplateTable::saload() {
 910   transition(itos, itos);
 911   const Register Rarray = R1_tmp;
 912   const Register Rindex = R0_tos;
 913 
 914   index_check(Rarray, Rindex);
 915   __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
 916 }
 917 
 918 
 919 void TemplateTable::iload(int n) {
 920   transition(vtos, itos);
 921   __ ldr_s32(R0_tos, iaddress(n));
 922 }
 923 
 924 
 925 void TemplateTable::lload(int n) {
 926   transition(vtos, ltos);
 927 #ifdef AARCH64
 928   __ ldr(R0_tos, laddress(n));
 929 #else
 930   __ ldr(R0_tos_lo, laddress(n));
 931   __ ldr(R1_tos_hi, haddress(n));
 932 #endif // AARCH64
 933 }
 934 
 935 
 936 void TemplateTable::fload(int n) {
 937   transition(vtos, ftos);
 938 #ifdef __SOFTFP__
 939   __ ldr(R0_tos, faddress(n));
 940 #else
 941   __ ldr_float(S0_tos, faddress(n));
 942 #endif // __SOFTFP__
 943 }
 944 
 945 
 946 void TemplateTable::dload(int n) {
 947   transition(vtos, dtos);
 948 #ifdef __SOFTFP__
 949   __ ldr(R0_tos_lo, laddress(n));
 950   __ ldr(R1_tos_hi, haddress(n));
 951 #else
 952   __ ldr_double(D0_tos, daddress(n));
 953 #endif // __SOFTFP__
 954 }
 955 
 956 
 957 void TemplateTable::aload(int n) {
 958   transition(vtos, atos);
 959   __ ldr(R0_tos, aaddress(n));
 960 }
 961 
 962 void TemplateTable::aload_0() {
 963   aload_0_internal();
 964 }
 965 
 966 void TemplateTable::nofast_aload_0() {
 967   aload_0_internal(may_not_rewrite);
 968 }
 969 
 970 void TemplateTable::aload_0_internal(RewriteControl rc) {
 971   transition(vtos, atos);
 972   // According to bytecode histograms, the pairs:
 973   //
 974   // _aload_0, _fast_igetfield
 975   // _aload_0, _fast_agetfield
 976   // _aload_0, _fast_fgetfield
 977   //
 978   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
 979   // bytecode checks if the next bytecode is either _fast_igetfield,
 980   // _fast_agetfield or _fast_fgetfield and then rewrites the
 981   // current bytecode into a pair bytecode; otherwise it rewrites the current
 982   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
 983   //
 984   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
 985   //       otherwise we may miss an opportunity for a pair.
 986   //
 987   // Also rewrite frequent pairs
 988   //   aload_0, aload_1
 989   //   aload_0, iload_1
 990   // These bytecodes with a small amount of code are most profitable to rewrite
 991   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 992     Label rewrite, done;
 993     const Register next_bytecode = R1_tmp;
 994     const Register target_bytecode = R2_tmp;
 995 
 996     // get next byte
 997     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 998 
 999     // if _getfield then wait with rewrite
1000     __ cmp(next_bytecode, Bytecodes::_getfield);
1001     __ b(done, eq);
1002 
1003     // if _igetfield then rewrite to _fast_iaccess_0
1004     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1005     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1006     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1007     __ b(rewrite, eq);
1008 
1009     // if _agetfield then rewrite to _fast_aaccess_0
1010     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1011     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1012     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1013     __ b(rewrite, eq);
1014 
1015     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1016     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1017     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1018 
1019     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1020 #ifdef AARCH64
1021     __ mov(Rtemp, Bytecodes::_fast_faccess_0);
1022     __ mov(target_bytecode, Bytecodes::_fast_aload_0);
1023     __ mov(target_bytecode, Rtemp, eq);
1024 #else
1025     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1026     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1027 #endif // AARCH64
1028 
1029     // rewrite
1030     __ bind(rewrite);
1031     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1032 
1033     __ bind(done);
1034   }
1035 
1036   aload(0);
1037 }
1038 
1039 void TemplateTable::istore() {
1040   transition(itos, vtos);
1041   const Register Rlocal_index = R2_tmp;
1042 
1043   locals_index(Rlocal_index);
1044   Address local = load_iaddress(Rlocal_index, Rtemp);
1045   __ str_32(R0_tos, local);
1046 }
1047 
1048 
1049 void TemplateTable::lstore() {
1050   transition(ltos, vtos);
1051   const Register Rlocal_index = R2_tmp;
1052 
1053   locals_index(Rlocal_index);
1054   store_category2_local(Rlocal_index, R3_tmp);
1055 }
1056 
1057 
1058 void TemplateTable::fstore() {
1059   transition(ftos, vtos);
1060   const Register Rlocal_index = R2_tmp;
1061 
1062   locals_index(Rlocal_index);
1063   Address local = load_faddress(Rlocal_index, Rtemp);
1064 #ifdef __SOFTFP__
1065   __ str(R0_tos, local);
1066 #else
1067   __ str_float(S0_tos, local);
1068 #endif // __SOFTFP__
1069 }
1070 
1071 
1072 void TemplateTable::dstore() {
1073   transition(dtos, vtos);
1074   const Register Rlocal_index = R2_tmp;
1075 
1076   locals_index(Rlocal_index);
1077 
1078 #ifdef __SOFTFP__
1079   store_category2_local(Rlocal_index, R3_tmp);
1080 #else
1081   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1082 #endif // __SOFTFP__
1083 }
1084 
1085 
1086 void TemplateTable::astore() {
1087   transition(vtos, vtos);
1088   const Register Rlocal_index = R1_tmp;
1089 
1090   __ pop_ptr(R0_tos);
1091   locals_index(Rlocal_index);
1092   Address local = load_aaddress(Rlocal_index, Rtemp);
1093   __ str(R0_tos, local);
1094 }
1095 
1096 
1097 void TemplateTable::wide_istore() {
1098   transition(vtos, vtos);
1099   const Register Rlocal_index = R2_tmp;
1100 
1101   __ pop_i(R0_tos);
1102   locals_index_wide(Rlocal_index);
1103   Address local = load_iaddress(Rlocal_index, Rtemp);
1104   __ str_32(R0_tos, local);
1105 }
1106 
1107 
1108 void TemplateTable::wide_lstore() {
1109   transition(vtos, vtos);
1110   const Register Rlocal_index = R2_tmp;
1111   const Register Rlocal_base = R3_tmp;
1112 
1113 #ifdef AARCH64
1114   __ pop_l(R0_tos);
1115 #else
1116   __ pop_l(R0_tos_lo, R1_tos_hi);
1117 #endif // AARCH64
1118 
1119   locals_index_wide(Rlocal_index);
1120   store_category2_local(Rlocal_index, R3_tmp);
1121 }
1122 
1123 
1124 void TemplateTable::wide_fstore() {
1125   wide_istore();
1126 }
1127 
1128 
1129 void TemplateTable::wide_dstore() {
1130   wide_lstore();
1131 }
1132 
1133 
1134 void TemplateTable::wide_astore() {
1135   transition(vtos, vtos);
1136   const Register Rlocal_index = R2_tmp;
1137 
1138   __ pop_ptr(R0_tos);
1139   locals_index_wide(Rlocal_index);
1140   Address local = load_aaddress(Rlocal_index, Rtemp);
1141   __ str(R0_tos, local);
1142 }
1143 
1144 
1145 void TemplateTable::iastore() {
1146   transition(itos, vtos);
1147   const Register Rindex = R4_tmp; // index_check prefers index in R4
1148   const Register Rarray = R3_tmp;
1149   // R0_tos: value
1150 
1151   __ pop_i(Rindex);
1152   index_check(Rarray, Rindex);
1153   __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1154 }
1155 
1156 
1157 void TemplateTable::lastore() {
1158   transition(ltos, vtos);
1159   const Register Rindex = R4_tmp; // index_check prefers index in R4
1160   const Register Rarray = R3_tmp;
1161   // R0_tos_lo:R1_tos_hi: value
1162 
1163   __ pop_i(Rindex);
1164   index_check(Rarray, Rindex);
1165 
1166 #ifdef AARCH64
1167   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1168 #else
1169   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1170   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1171   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1172 #endif // AARCH64
1173 }
1174 
1175 
1176 void TemplateTable::fastore() {
1177   transition(ftos, vtos);
1178   const Register Rindex = R4_tmp; // index_check prefers index in R4
1179   const Register Rarray = R3_tmp;
1180   // S0_tos/R0_tos: value
1181 
1182   __ pop_i(Rindex);
1183   index_check(Rarray, Rindex);
1184   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1185 
1186 #ifdef __SOFTFP__
1187   __ str(R0_tos, addr);
1188 #else
1189   __ str_float(S0_tos, addr);
1190 #endif // __SOFTFP__
1191 }
1192 
1193 
1194 void TemplateTable::dastore() {
1195   transition(dtos, vtos);
1196   const Register Rindex = R4_tmp; // index_check prefers index in R4
1197   const Register Rarray = R3_tmp;
1198   // D0_tos / R0_tos_lo:R1_to_hi: value
1199 
1200   __ pop_i(Rindex);
1201   index_check(Rarray, Rindex);
1202 
1203 #ifdef __SOFTFP__
1204   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1205   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1206   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1207 #else
1208   __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1209 #endif // __SOFTFP__
1210 }
1211 
1212 
1213 void TemplateTable::aastore() {
1214   transition(vtos, vtos);
1215   Label is_null, throw_array_store, done;
1216 
1217   const Register Raddr_1   = R1_tmp;
1218   const Register Rvalue_2  = R2_tmp;
1219   const Register Rarray_3  = R3_tmp;
1220   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1221   const Register Rsub_5    = R5_tmp;
1222   const Register Rsuper_LR = LR_tmp;
1223 
1224   // stack: ..., array, index, value
1225   __ ldr(Rvalue_2, at_tos());     // Value
1226   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1227   __ ldr(Rarray_3, at_tos_p2());  // Array
1228 
1229   index_check_without_pop(Rarray_3, Rindex_4);
1230 
1231   // Compute the array base
1232   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1233 
1234   // do array store check - check for NULL value first
1235   __ cbz(Rvalue_2, is_null);
1236 
1237   // Load subklass
1238   __ load_klass(Rsub_5, Rvalue_2);
1239   // Load superklass
1240   __ load_klass(Rtemp, Rarray_3);
1241   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1242 
1243   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1244   // Come here on success
1245 
1246   // Store value
1247   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1248 
1249   // Now store using the appropriate barrier
1250   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, false);
1251   __ b(done);
1252 
1253   __ bind(throw_array_store);
1254 
1255   // Come here on failure of subtype check
1256   __ profile_typecheck_failed(R0_tmp);
1257 
1258   // object is at TOS
1259   __ b(Interpreter::_throw_ArrayStoreException_entry);
1260 
1261   // Have a NULL in Rvalue_2, store NULL at array[index].
1262   __ bind(is_null);
1263   __ profile_null_seen(R0_tmp);
1264 
1265   // Store a NULL
1266   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, true);
1267 
1268   // Pop stack arguments
1269   __ bind(done);
1270   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1271 }
1272 
1273 
1274 void TemplateTable::bastore() {
1275   transition(itos, vtos);
1276   const Register Rindex = R4_tmp; // index_check prefers index in R4
1277   const Register Rarray = R3_tmp;
1278   // R0_tos: value
1279 
1280   __ pop_i(Rindex);
1281   index_check(Rarray, Rindex);
1282 
1283   // Need to check whether array is boolean or byte
1284   // since both types share the bastore bytecode.
1285   __ load_klass(Rtemp, Rarray);
1286   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1287   Label L_skip;
1288   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1289   __ b(L_skip, eq);
1290   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1291   __ bind(L_skip);
1292   __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1293 }
1294 
1295 
1296 void TemplateTable::castore() {
1297   transition(itos, vtos);
1298   const Register Rindex = R4_tmp; // index_check prefers index in R4
1299   const Register Rarray = R3_tmp;
1300   // R0_tos: value
1301 
1302   __ pop_i(Rindex);
1303   index_check(Rarray, Rindex);
1304 
1305   __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1306 }
1307 
1308 
1309 void TemplateTable::sastore() {
1310   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1311            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1312          "base offsets for char and short should be equal");
1313   castore();
1314 }
1315 
1316 
1317 void TemplateTable::istore(int n) {
1318   transition(itos, vtos);
1319   __ str_32(R0_tos, iaddress(n));
1320 }
1321 
1322 
1323 void TemplateTable::lstore(int n) {
1324   transition(ltos, vtos);
1325 #ifdef AARCH64
1326   __ str(R0_tos, laddress(n));
1327 #else
1328   __ str(R0_tos_lo, laddress(n));
1329   __ str(R1_tos_hi, haddress(n));
1330 #endif // AARCH64
1331 }
1332 
1333 
1334 void TemplateTable::fstore(int n) {
1335   transition(ftos, vtos);
1336 #ifdef __SOFTFP__
1337   __ str(R0_tos, faddress(n));
1338 #else
1339   __ str_float(S0_tos, faddress(n));
1340 #endif // __SOFTFP__
1341 }
1342 
1343 
1344 void TemplateTable::dstore(int n) {
1345   transition(dtos, vtos);
1346 #ifdef __SOFTFP__
1347   __ str(R0_tos_lo, laddress(n));
1348   __ str(R1_tos_hi, haddress(n));
1349 #else
1350   __ str_double(D0_tos, daddress(n));
1351 #endif // __SOFTFP__
1352 }
1353 
1354 
1355 void TemplateTable::astore(int n) {
1356   transition(vtos, vtos);
1357   __ pop_ptr(R0_tos);
1358   __ str(R0_tos, aaddress(n));
1359 }
1360 
1361 
1362 void TemplateTable::pop() {
1363   transition(vtos, vtos);
1364   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1365 }
1366 
1367 
1368 void TemplateTable::pop2() {
1369   transition(vtos, vtos);
1370   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1371 }
1372 
1373 
1374 void TemplateTable::dup() {
1375   transition(vtos, vtos);
1376   // stack: ..., a
1377   __ load_ptr(0, R0_tmp);
1378   __ push_ptr(R0_tmp);
1379   // stack: ..., a, a
1380 }
1381 
1382 
1383 void TemplateTable::dup_x1() {
1384   transition(vtos, vtos);
1385   // stack: ..., a, b
1386   __ load_ptr(0, R0_tmp);  // load b
1387   __ load_ptr(1, R2_tmp);  // load a
1388   __ store_ptr(1, R0_tmp); // store b
1389   __ store_ptr(0, R2_tmp); // store a
1390   __ push_ptr(R0_tmp);     // push b
1391   // stack: ..., b, a, b
1392 }
1393 
1394 
1395 void TemplateTable::dup_x2() {
1396   transition(vtos, vtos);
1397   // stack: ..., a, b, c
1398   __ load_ptr(0, R0_tmp);   // load c
1399   __ load_ptr(1, R2_tmp);   // load b
1400   __ load_ptr(2, R4_tmp);   // load a
1401 
1402   __ push_ptr(R0_tmp);      // push c
1403 
1404   // stack: ..., a, b, c, c
1405   __ store_ptr(1, R2_tmp);  // store b
1406   __ store_ptr(2, R4_tmp);  // store a
1407   __ store_ptr(3, R0_tmp);  // store c
1408   // stack: ..., c, a, b, c
1409 }
1410 
1411 
1412 void TemplateTable::dup2() {
1413   transition(vtos, vtos);
1414   // stack: ..., a, b
1415   __ load_ptr(1, R0_tmp);  // load a
1416   __ push_ptr(R0_tmp);     // push a
1417   __ load_ptr(1, R0_tmp);  // load b
1418   __ push_ptr(R0_tmp);     // push b
1419   // stack: ..., a, b, a, b
1420 }
1421 
1422 
1423 void TemplateTable::dup2_x1() {
1424   transition(vtos, vtos);
1425 
1426   // stack: ..., a, b, c
1427   __ load_ptr(0, R4_tmp);  // load c
1428   __ load_ptr(1, R2_tmp);  // load b
1429   __ load_ptr(2, R0_tmp);  // load a
1430 
1431   __ push_ptr(R2_tmp);     // push b
1432   __ push_ptr(R4_tmp);     // push c
1433 
1434   // stack: ..., a, b, c, b, c
1435 
1436   __ store_ptr(2, R0_tmp);  // store a
1437   __ store_ptr(3, R4_tmp);  // store c
1438   __ store_ptr(4, R2_tmp);  // store b
1439 
1440   // stack: ..., b, c, a, b, c
1441 }
1442 
1443 
1444 void TemplateTable::dup2_x2() {
1445   transition(vtos, vtos);
1446   // stack: ..., a, b, c, d
1447   __ load_ptr(0, R0_tmp);  // load d
1448   __ load_ptr(1, R2_tmp);  // load c
1449   __ push_ptr(R2_tmp);     // push c
1450   __ push_ptr(R0_tmp);     // push d
1451   // stack: ..., a, b, c, d, c, d
1452   __ load_ptr(4, R4_tmp);  // load b
1453   __ store_ptr(4, R0_tmp); // store d in b
1454   __ store_ptr(2, R4_tmp); // store b in d
1455   // stack: ..., a, d, c, b, c, d
1456   __ load_ptr(5, R4_tmp);  // load a
1457   __ store_ptr(5, R2_tmp); // store c in a
1458   __ store_ptr(3, R4_tmp); // store a in c
1459   // stack: ..., c, d, a, b, c, d
1460 }
1461 
1462 
1463 void TemplateTable::swap() {
1464   transition(vtos, vtos);
1465   // stack: ..., a, b
1466   __ load_ptr(1, R0_tmp);  // load a
1467   __ load_ptr(0, R2_tmp);  // load b
1468   __ store_ptr(0, R0_tmp); // store a in b
1469   __ store_ptr(1, R2_tmp); // store b in a
1470   // stack: ..., b, a
1471 }
1472 
1473 
1474 void TemplateTable::iop2(Operation op) {
1475   transition(itos, itos);
1476   const Register arg1 = R1_tmp;
1477   const Register arg2 = R0_tos;
1478 
1479   __ pop_i(arg1);
1480   switch (op) {
1481     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1482     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1483     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1484     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1485     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1486     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1487 #ifdef AARCH64
1488     case shl  : __ lslv_w (R0_tos, arg1, arg2); break;
1489     case shr  : __ asrv_w (R0_tos, arg1, arg2); break;
1490     case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1491 #else
1492     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1493     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1494     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1495 #endif // AARCH64
1496     default   : ShouldNotReachHere();
1497   }
1498 }
1499 
1500 
1501 void TemplateTable::lop2(Operation op) {
1502   transition(ltos, ltos);
1503 #ifdef AARCH64
1504   const Register arg1 = R1_tmp;
1505   const Register arg2 = R0_tos;
1506 
1507   __ pop_l(arg1);
1508   switch (op) {
1509     case add  : __ add (R0_tos, arg1, arg2); break;
1510     case sub  : __ sub (R0_tos, arg1, arg2); break;
1511     case _and : __ andr(R0_tos, arg1, arg2); break;
1512     case _or  : __ orr (R0_tos, arg1, arg2); break;
1513     case _xor : __ eor (R0_tos, arg1, arg2); break;
1514     default   : ShouldNotReachHere();
1515   }
1516 #else
1517   const Register arg1_lo = R2_tmp;
1518   const Register arg1_hi = R3_tmp;
1519   const Register arg2_lo = R0_tos_lo;
1520   const Register arg2_hi = R1_tos_hi;
1521 
1522   __ pop_l(arg1_lo, arg1_hi);
1523   switch (op) {
1524     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1525     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1526     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1527     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1528     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1529     default : ShouldNotReachHere();
1530   }
1531 #endif // AARCH64
1532 }
1533 
1534 
1535 void TemplateTable::idiv() {
1536   transition(itos, itos);
1537 #ifdef AARCH64
1538   const Register divisor = R0_tos;
1539   const Register dividend = R1_tmp;
1540 
1541   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1542   __ pop_i(dividend);
1543   __ sdiv_w(R0_tos, dividend, divisor);
1544 #else
1545   __ mov(R2, R0_tos);
1546   __ pop_i(R0);
1547   // R0 - dividend
1548   // R2 - divisor
1549   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1550   // R1 - result
1551   __ mov(R0_tos, R1);
1552 #endif // AARCH64
1553 }
1554 
1555 
1556 void TemplateTable::irem() {
1557   transition(itos, itos);
1558 #ifdef AARCH64
1559   const Register divisor = R0_tos;
1560   const Register dividend = R1_tmp;
1561   const Register quotient = R2_tmp;
1562 
1563   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1564   __ pop_i(dividend);
1565   __ sdiv_w(quotient, dividend, divisor);
1566   __ msub_w(R0_tos, divisor, quotient, dividend);
1567 #else
1568   __ mov(R2, R0_tos);
1569   __ pop_i(R0);
1570   // R0 - dividend
1571   // R2 - divisor
1572   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1573   // R0 - remainder
1574 #endif // AARCH64
1575 }
1576 
1577 
1578 void TemplateTable::lmul() {
1579   transition(ltos, ltos);
1580 #ifdef AARCH64
1581   const Register arg1 = R0_tos;
1582   const Register arg2 = R1_tmp;
1583 
1584   __ pop_l(arg2);
1585   __ mul(R0_tos, arg1, arg2);
1586 #else
1587   const Register arg1_lo = R0_tos_lo;
1588   const Register arg1_hi = R1_tos_hi;
1589   const Register arg2_lo = R2_tmp;
1590   const Register arg2_hi = R3_tmp;
1591 
1592   __ pop_l(arg2_lo, arg2_hi);
1593 
1594   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1595 #endif // AARCH64
1596 }
1597 
1598 
1599 void TemplateTable::ldiv() {
1600   transition(ltos, ltos);
1601 #ifdef AARCH64
1602   const Register divisor = R0_tos;
1603   const Register dividend = R1_tmp;
1604 
1605   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1606   __ pop_l(dividend);
1607   __ sdiv(R0_tos, dividend, divisor);
1608 #else
1609   const Register x_lo = R2_tmp;
1610   const Register x_hi = R3_tmp;
1611   const Register y_lo = R0_tos_lo;
1612   const Register y_hi = R1_tos_hi;
1613 
1614   __ pop_l(x_lo, x_hi);
1615 
1616   // check if y = 0
1617   __ orrs(Rtemp, y_lo, y_hi);
1618   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1619   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1620 #endif // AARCH64
1621 }
1622 
1623 
1624 void TemplateTable::lrem() {
1625   transition(ltos, ltos);
1626 #ifdef AARCH64
1627   const Register divisor = R0_tos;
1628   const Register dividend = R1_tmp;
1629   const Register quotient = R2_tmp;
1630 
1631   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1632   __ pop_l(dividend);
1633   __ sdiv(quotient, dividend, divisor);
1634   __ msub(R0_tos, divisor, quotient, dividend);
1635 #else
1636   const Register x_lo = R2_tmp;
1637   const Register x_hi = R3_tmp;
1638   const Register y_lo = R0_tos_lo;
1639   const Register y_hi = R1_tos_hi;
1640 
1641   __ pop_l(x_lo, x_hi);
1642 
1643   // check if y = 0
1644   __ orrs(Rtemp, y_lo, y_hi);
1645   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1646   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1647 #endif // AARCH64
1648 }
1649 
1650 
1651 void TemplateTable::lshl() {
1652   transition(itos, ltos);
1653 #ifdef AARCH64
1654   const Register val = R1_tmp;
1655   const Register shift_cnt = R0_tos;
1656   __ pop_l(val);
1657   __ lslv(R0_tos, val, shift_cnt);
1658 #else
1659   const Register shift_cnt = R4_tmp;
1660   const Register val_lo = R2_tmp;
1661   const Register val_hi = R3_tmp;
1662 
1663   __ pop_l(val_lo, val_hi);
1664   __ andr(shift_cnt, R0_tos, 63);
1665   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1666 #endif // AARCH64
1667 }
1668 
1669 
1670 void TemplateTable::lshr() {
1671   transition(itos, ltos);
1672 #ifdef AARCH64
1673   const Register val = R1_tmp;
1674   const Register shift_cnt = R0_tos;
1675   __ pop_l(val);
1676   __ asrv(R0_tos, val, shift_cnt);
1677 #else
1678   const Register shift_cnt = R4_tmp;
1679   const Register val_lo = R2_tmp;
1680   const Register val_hi = R3_tmp;
1681 
1682   __ pop_l(val_lo, val_hi);
1683   __ andr(shift_cnt, R0_tos, 63);
1684   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1685 #endif // AARCH64
1686 }
1687 
1688 
1689 void TemplateTable::lushr() {
1690   transition(itos, ltos);
1691 #ifdef AARCH64
1692   const Register val = R1_tmp;
1693   const Register shift_cnt = R0_tos;
1694   __ pop_l(val);
1695   __ lsrv(R0_tos, val, shift_cnt);
1696 #else
1697   const Register shift_cnt = R4_tmp;
1698   const Register val_lo = R2_tmp;
1699   const Register val_hi = R3_tmp;
1700 
1701   __ pop_l(val_lo, val_hi);
1702   __ andr(shift_cnt, R0_tos, 63);
1703   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1704 #endif // AARCH64
1705 }
1706 
1707 
1708 void TemplateTable::fop2(Operation op) {
1709   transition(ftos, ftos);
1710 #ifdef __SOFTFP__
1711   __ mov(R1, R0_tos);
1712   __ pop_i(R0);
1713   switch (op) {
1714     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1715     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1716     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1717     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1718     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1719     default : ShouldNotReachHere();
1720   }
1721 #else
1722   const FloatRegister arg1 = S1_tmp;
1723   const FloatRegister arg2 = S0_tos;
1724 
1725   switch (op) {
1726     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1727     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1728     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1729     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1730     case rem:
1731 #ifndef __ABI_HARD__
1732       __ pop_f(arg1);
1733       __ fmrs(R0, arg1);
1734       __ fmrs(R1, arg2);
1735       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1736       __ fmsr(S0_tos, R0);
1737 #else
1738       __ mov_float(S1_reg, arg2);
1739       __ pop_f(S0);
1740       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1741 #endif // !__ABI_HARD__
1742       break;
1743     default : ShouldNotReachHere();
1744   }
1745 #endif // __SOFTFP__
1746 }
1747 
1748 
1749 void TemplateTable::dop2(Operation op) {
1750   transition(dtos, dtos);
1751 #ifdef __SOFTFP__
1752   __ mov(R2, R0_tos_lo);
1753   __ mov(R3, R1_tos_hi);
1754   __ pop_l(R0, R1);
1755   switch (op) {
1756     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1757     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1758     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1759     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1760     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1761     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1762     default : ShouldNotReachHere();
1763   }
1764 #else
1765   const FloatRegister arg1 = D1_tmp;
1766   const FloatRegister arg2 = D0_tos;
1767 
1768   switch (op) {
1769     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1770     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1771     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1772     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1773     case rem:
1774 #ifndef __ABI_HARD__
1775       __ pop_d(arg1);
1776       __ fmrrd(R0, R1, arg1);
1777       __ fmrrd(R2, R3, arg2);
1778       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1779       __ fmdrr(D0_tos, R0, R1);
1780 #else
1781       __ mov_double(D1, arg2);
1782       __ pop_d(D0);
1783       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1784 #endif // !__ABI_HARD__
1785       break;
1786     default : ShouldNotReachHere();
1787   }
1788 #endif // __SOFTFP__
1789 }
1790 
1791 
1792 void TemplateTable::ineg() {
1793   transition(itos, itos);
1794   __ neg_32(R0_tos, R0_tos);
1795 }
1796 
1797 
1798 void TemplateTable::lneg() {
1799   transition(ltos, ltos);
1800 #ifdef AARCH64
1801   __ neg(R0_tos, R0_tos);
1802 #else
1803   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1804   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1805 #endif // AARCH64
1806 }
1807 
1808 
1809 void TemplateTable::fneg() {
1810   transition(ftos, ftos);
1811 #ifdef __SOFTFP__
1812   // Invert sign bit
1813   const int sign_mask = 0x80000000;
1814   __ eor(R0_tos, R0_tos, sign_mask);
1815 #else
1816   __ neg_float(S0_tos, S0_tos);
1817 #endif // __SOFTFP__
1818 }
1819 
1820 
1821 void TemplateTable::dneg() {
1822   transition(dtos, dtos);
1823 #ifdef __SOFTFP__
1824   // Invert sign bit in the high part of the double
1825   const int sign_mask_hi = 0x80000000;
1826   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1827 #else
1828   __ neg_double(D0_tos, D0_tos);
1829 #endif // __SOFTFP__
1830 }
1831 
1832 
1833 void TemplateTable::iinc() {
1834   transition(vtos, vtos);
1835   const Register Rconst = R2_tmp;
1836   const Register Rlocal_index = R1_tmp;
1837   const Register Rval = R0_tmp;
1838 
1839   __ ldrsb(Rconst, at_bcp(2));
1840   locals_index(Rlocal_index);
1841   Address local = load_iaddress(Rlocal_index, Rtemp);
1842   __ ldr_s32(Rval, local);
1843   __ add(Rval, Rval, Rconst);
1844   __ str_32(Rval, local);
1845 }
1846 
1847 
1848 void TemplateTable::wide_iinc() {
1849   transition(vtos, vtos);
1850   const Register Rconst = R2_tmp;
1851   const Register Rlocal_index = R1_tmp;
1852   const Register Rval = R0_tmp;
1853 
1854   // get constant in Rconst
1855   __ ldrsb(R2_tmp, at_bcp(4));
1856   __ ldrb(R3_tmp, at_bcp(5));
1857   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1858 
1859   locals_index_wide(Rlocal_index);
1860   Address local = load_iaddress(Rlocal_index, Rtemp);
1861   __ ldr_s32(Rval, local);
1862   __ add(Rval, Rval, Rconst);
1863   __ str_32(Rval, local);
1864 }
1865 
1866 
1867 void TemplateTable::convert() {
1868   // Checking
1869 #ifdef ASSERT
1870   { TosState tos_in  = ilgl;
1871     TosState tos_out = ilgl;
1872     switch (bytecode()) {
1873       case Bytecodes::_i2l: // fall through
1874       case Bytecodes::_i2f: // fall through
1875       case Bytecodes::_i2d: // fall through
1876       case Bytecodes::_i2b: // fall through
1877       case Bytecodes::_i2c: // fall through
1878       case Bytecodes::_i2s: tos_in = itos; break;
1879       case Bytecodes::_l2i: // fall through
1880       case Bytecodes::_l2f: // fall through
1881       case Bytecodes::_l2d: tos_in = ltos; break;
1882       case Bytecodes::_f2i: // fall through
1883       case Bytecodes::_f2l: // fall through
1884       case Bytecodes::_f2d: tos_in = ftos; break;
1885       case Bytecodes::_d2i: // fall through
1886       case Bytecodes::_d2l: // fall through
1887       case Bytecodes::_d2f: tos_in = dtos; break;
1888       default             : ShouldNotReachHere();
1889     }
1890     switch (bytecode()) {
1891       case Bytecodes::_l2i: // fall through
1892       case Bytecodes::_f2i: // fall through
1893       case Bytecodes::_d2i: // fall through
1894       case Bytecodes::_i2b: // fall through
1895       case Bytecodes::_i2c: // fall through
1896       case Bytecodes::_i2s: tos_out = itos; break;
1897       case Bytecodes::_i2l: // fall through
1898       case Bytecodes::_f2l: // fall through
1899       case Bytecodes::_d2l: tos_out = ltos; break;
1900       case Bytecodes::_i2f: // fall through
1901       case Bytecodes::_l2f: // fall through
1902       case Bytecodes::_d2f: tos_out = ftos; break;
1903       case Bytecodes::_i2d: // fall through
1904       case Bytecodes::_l2d: // fall through
1905       case Bytecodes::_f2d: tos_out = dtos; break;
1906       default             : ShouldNotReachHere();
1907     }
1908     transition(tos_in, tos_out);
1909   }
1910 #endif // ASSERT
1911 
1912   // Conversion
1913   switch (bytecode()) {
1914     case Bytecodes::_i2l:
1915 #ifdef AARCH64
1916       __ sign_extend(R0_tos, R0_tos, 32);
1917 #else
1918       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1919 #endif // AARCH64
1920       break;
1921 
1922     case Bytecodes::_i2f:
1923 #ifdef AARCH64
1924       __ scvtf_sw(S0_tos, R0_tos);
1925 #else
1926 #ifdef __SOFTFP__
1927       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1928 #else
1929       __ fmsr(S0_tmp, R0_tos);
1930       __ fsitos(S0_tos, S0_tmp);
1931 #endif // __SOFTFP__
1932 #endif // AARCH64
1933       break;
1934 
1935     case Bytecodes::_i2d:
1936 #ifdef AARCH64
1937       __ scvtf_dw(D0_tos, R0_tos);
1938 #else
1939 #ifdef __SOFTFP__
1940       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1941 #else
1942       __ fmsr(S0_tmp, R0_tos);
1943       __ fsitod(D0_tos, S0_tmp);
1944 #endif // __SOFTFP__
1945 #endif // AARCH64
1946       break;
1947 
1948     case Bytecodes::_i2b:
1949       __ sign_extend(R0_tos, R0_tos, 8);
1950       break;
1951 
1952     case Bytecodes::_i2c:
1953       __ zero_extend(R0_tos, R0_tos, 16);
1954       break;
1955 
1956     case Bytecodes::_i2s:
1957       __ sign_extend(R0_tos, R0_tos, 16);
1958       break;
1959 
1960     case Bytecodes::_l2i:
1961       /* nothing to do */
1962       break;
1963 
1964     case Bytecodes::_l2f:
1965 #ifdef AARCH64
1966       __ scvtf_sx(S0_tos, R0_tos);
1967 #else
1968       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1969 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1970       __ fmsr(S0_tos, R0);
1971 #endif // !__SOFTFP__ && !__ABI_HARD__
1972 #endif // AARCH64
1973       break;
1974 
1975     case Bytecodes::_l2d:
1976 #ifdef AARCH64
1977       __ scvtf_dx(D0_tos, R0_tos);
1978 #else
1979       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1980 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1981       __ fmdrr(D0_tos, R0, R1);
1982 #endif // !__SOFTFP__ && !__ABI_HARD__
1983 #endif // AARCH64
1984       break;
1985 
1986     case Bytecodes::_f2i:
1987 #ifdef AARCH64
1988       __ fcvtzs_ws(R0_tos, S0_tos);
1989 #else
1990 #ifndef __SOFTFP__
1991       __ ftosizs(S0_tos, S0_tos);
1992       __ fmrs(R0_tos, S0_tos);
1993 #else
1994       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1995 #endif // !__SOFTFP__
1996 #endif // AARCH64
1997       break;
1998 
1999     case Bytecodes::_f2l:
2000 #ifdef AARCH64
2001       __ fcvtzs_xs(R0_tos, S0_tos);
2002 #else
2003 #ifndef __SOFTFP__
2004       __ fmrs(R0_tos, S0_tos);
2005 #endif // !__SOFTFP__
2006       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
2007 #endif // AARCH64
2008       break;
2009 
2010     case Bytecodes::_f2d:
2011 #ifdef __SOFTFP__
2012       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
2013 #else
2014       __ convert_f2d(D0_tos, S0_tos);
2015 #endif // __SOFTFP__
2016       break;
2017 
2018     case Bytecodes::_d2i:
2019 #ifdef AARCH64
2020       __ fcvtzs_wd(R0_tos, D0_tos);
2021 #else
2022 #ifndef __SOFTFP__
2023       __ ftosizd(Stemp, D0);
2024       __ fmrs(R0, Stemp);
2025 #else
2026       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
2027 #endif // !__SOFTFP__
2028 #endif // AARCH64
2029       break;
2030 
2031     case Bytecodes::_d2l:
2032 #ifdef AARCH64
2033       __ fcvtzs_xd(R0_tos, D0_tos);
2034 #else
2035 #ifndef __SOFTFP__
2036       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
2037 #endif // !__SOFTFP__
2038       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
2039 #endif // AARCH64
2040       break;
2041 
2042     case Bytecodes::_d2f:
2043 #ifdef __SOFTFP__
2044       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
2045 #else
2046       __ convert_d2f(S0_tos, D0_tos);
2047 #endif // __SOFTFP__
2048       break;
2049 
2050     default:
2051       ShouldNotReachHere();
2052   }
2053 }
2054 
2055 
2056 void TemplateTable::lcmp() {
2057   transition(ltos, itos);
2058 #ifdef AARCH64
2059   const Register arg1 = R1_tmp;
2060   const Register arg2 = R0_tos;
2061 
2062   __ pop_l(arg1);
2063 
2064   __ cmp(arg1, arg2);
2065   __ cset(R0_tos, gt);               // 1 if '>', else 0
2066   __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2067 #else
2068   const Register arg1_lo = R2_tmp;
2069   const Register arg1_hi = R3_tmp;
2070   const Register arg2_lo = R0_tos_lo;
2071   const Register arg2_hi = R1_tos_hi;
2072   const Register res = R4_tmp;
2073 
2074   __ pop_l(arg1_lo, arg1_hi);
2075 
2076   // long compare arg1 with arg2
2077   // result is -1/0/+1 if '<'/'='/'>'
2078   Label done;
2079 
2080   __ mov (res, 0);
2081   __ cmp (arg1_hi, arg2_hi);
2082   __ mvn (res, 0, lt);
2083   __ mov (res, 1, gt);
2084   __ b(done, ne);
2085   __ cmp (arg1_lo, arg2_lo);
2086   __ mvn (res, 0, lo);
2087   __ mov (res, 1, hi);
2088   __ bind(done);
2089   __ mov (R0_tos, res);
2090 #endif // AARCH64
2091 }
2092 
2093 
2094 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2095   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2096 
2097 #ifdef AARCH64
2098   if (is_float) {
2099     transition(ftos, itos);
2100     __ pop_f(S1_tmp);
2101     __ fcmp_s(S1_tmp, S0_tos);
2102   } else {
2103     transition(dtos, itos);
2104     __ pop_d(D1_tmp);
2105     __ fcmp_d(D1_tmp, D0_tos);
2106   }
2107 
2108   if (unordered_result < 0) {
2109     __ cset(R0_tos, gt);               // 1 if '>', else 0
2110     __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2111   } else {
2112     __ cset(R0_tos, hi);               // 1 if '>' or unordered, else 0
2113     __ csinv(R0_tos, R0_tos, ZR, pl);  // previous value if '>=' or unordered, else -1
2114   }
2115 
2116 #else
2117 
2118 #ifdef __SOFTFP__
2119 
2120   if (is_float) {
2121     transition(ftos, itos);
2122     const Register Rx = R0;
2123     const Register Ry = R1;
2124 
2125     __ mov(Ry, R0_tos);
2126     __ pop_i(Rx);
2127 
2128     if (unordered_result == 1) {
2129       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2130     } else {
2131       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2132     }
2133 
2134   } else {
2135 
2136     transition(dtos, itos);
2137     const Register Rx_lo = R0;
2138     const Register Rx_hi = R1;
2139     const Register Ry_lo = R2;
2140     const Register Ry_hi = R3;
2141 
2142     __ mov(Ry_lo, R0_tos_lo);
2143     __ mov(Ry_hi, R1_tos_hi);
2144     __ pop_l(Rx_lo, Rx_hi);
2145 
2146     if (unordered_result == 1) {
2147       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2148     } else {
2149       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2150     }
2151   }
2152 
2153 #else
2154 
2155   if (is_float) {
2156     transition(ftos, itos);
2157     __ pop_f(S1_tmp);
2158     __ fcmps(S1_tmp, S0_tos);
2159   } else {
2160     transition(dtos, itos);
2161     __ pop_d(D1_tmp);
2162     __ fcmpd(D1_tmp, D0_tos);
2163   }
2164 
2165   __ fmstat();
2166 
2167   // comparison result | flag N | flag Z | flag C | flag V
2168   // "<"               |   1    |   0    |   0    |   0
2169   // "=="              |   0    |   1    |   1    |   0
2170   // ">"               |   0    |   0    |   1    |   0
2171   // unordered         |   0    |   0    |   1    |   1
2172 
2173   if (unordered_result < 0) {
2174     __ mov(R0_tos, 1);           // result ==  1 if greater
2175     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2176   } else {
2177     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2178     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2179   }
2180   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2181 #endif // __SOFTFP__
2182 #endif // AARCH64
2183 }
2184 
2185 
2186 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2187 
2188   const Register Rdisp = R0_tmp;
2189   const Register Rbumped_taken_count = R5_tmp;
2190 
2191   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2192 
2193   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2194                              InvocationCounter::counter_offset();
2195   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2196                               InvocationCounter::counter_offset();
2197   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2198 
2199   // Load up R0 with the branch displacement
2200   if (is_wide) {
2201     __ ldrsb(R0_tmp, at_bcp(1));
2202     __ ldrb(R1_tmp, at_bcp(2));
2203     __ ldrb(R2_tmp, at_bcp(3));
2204     __ ldrb(R3_tmp, at_bcp(4));
2205     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2206     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2207     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2208   } else {
2209     __ ldrsb(R0_tmp, at_bcp(1));
2210     __ ldrb(R1_tmp, at_bcp(2));
2211     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2212   }
2213 
2214   // Handle all the JSR stuff here, then exit.
2215   // It's much shorter and cleaner than intermingling with the
2216   // non-JSR normal-branch stuff occuring below.
2217   if (is_jsr) {
2218     // compute return address as bci in R1
2219     const Register Rret_addr = R1_tmp;
2220     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2221 
2222     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2223     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2224     __ sub(Rret_addr, Rret_addr, Rtemp);
2225 
2226     // Load the next target bytecode into R3_bytecode and advance Rbcp
2227 #ifdef AARCH64
2228     __ add(Rbcp, Rbcp, Rdisp);
2229     __ ldrb(R3_bytecode, Address(Rbcp));
2230 #else
2231     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2232 #endif // AARCH64
2233 
2234     // Push return address
2235     __ push_i(Rret_addr);
2236     // jsr returns vtos
2237     __ dispatch_only_noverify(vtos);
2238     return;
2239   }
2240 
2241   // Normal (non-jsr) branch handling
2242 
2243   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2244 #ifdef AARCH64
2245   __ add(Rbcp, Rbcp, Rdisp);
2246   __ ldrb(R3_bytecode, Address(Rbcp));
2247 #else
2248   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2249 #endif // AARCH64
2250 
2251   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2252   Label backedge_counter_overflow;
2253   Label profile_method;
2254   Label dispatch;
2255 
2256   if (UseLoopCounter) {
2257     // increment backedge counter for backward branches
2258     // Rdisp (R0): target offset
2259 
2260     const Register Rcnt = R2_tmp;
2261     const Register Rcounters = R1_tmp;
2262 
2263     // count only if backward branch
2264 #ifdef AARCH64
2265     __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2266 #else
2267     __ tst(Rdisp, Rdisp);
2268     __ b(dispatch, pl);
2269 #endif // AARCH64
2270 
2271     if (TieredCompilation) {
2272       Label no_mdo;
2273       int increment = InvocationCounter::count_increment;
2274       if (ProfileInterpreter) {
2275         // Are we profiling?
2276         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2277         __ cbz(Rtemp, no_mdo);
2278         // Increment the MDO backedge counter
2279         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2280                                                   in_bytes(InvocationCounter::counter_offset()));
2281         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2282         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2283                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2284         __ b(dispatch);
2285       }
2286       __ bind(no_mdo);
2287       // Increment backedge counter in MethodCounters*
2288       // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2289       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2290                              Rdisp, R3_bytecode,
2291                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2292       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2293       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2294                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2295     } else {
2296       // Increment backedge counter in MethodCounters*
2297       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2298                              Rdisp, R3_bytecode,
2299                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2300       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2301       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2302       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2303 
2304       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2305 #ifdef AARCH64
2306       __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value);  // and the status bits
2307 #else
2308       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2309 #endif // AARCH64
2310       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2311 
2312       if (ProfileInterpreter) {
2313         // Test to see if we should create a method data oop
2314         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2315         __ ldr_s32(Rtemp, profile_limit);
2316         __ cmp_32(Rcnt, Rtemp);
2317         __ b(dispatch, lt);
2318 
2319         // if no method data exists, go to profile method
2320         __ test_method_data_pointer(R4_tmp, profile_method);
2321 
2322         if (UseOnStackReplacement) {
2323           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2324           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2325           __ ldr_s32(Rtemp, backward_branch_limit);
2326           __ cmp(Rbumped_taken_count, Rtemp);
2327           __ b(dispatch, lo);
2328 
2329           // When ProfileInterpreter is on, the backedge_count comes from the
2330           // MethodData*, which value does not get reset on the call to
2331           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2332           // routine while the method is being compiled, add a second test to make
2333           // sure the overflow function is called only once every overflow_frequency.
2334           const int overflow_frequency = 1024;
2335 
2336 #ifdef AARCH64
2337           __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2338 #else
2339           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2340           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2341           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2342 #endif // AARCH64
2343 
2344           __ b(backedge_counter_overflow, eq);
2345         }
2346       } else {
2347         if (UseOnStackReplacement) {
2348           // check for overflow against Rcnt, which is the sum of the counters
2349           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2350           __ ldr_s32(Rtemp, backward_branch_limit);
2351           __ cmp_32(Rcnt, Rtemp);
2352           __ b(backedge_counter_overflow, hs);
2353 
2354         }
2355       }
2356     }
2357     __ bind(dispatch);
2358   }
2359 
2360   if (!UseOnStackReplacement) {
2361     __ bind(backedge_counter_overflow);
2362   }
2363 
2364   // continue with the bytecode @ target
2365   __ dispatch_only(vtos);
2366 
2367   if (UseLoopCounter) {
2368     if (ProfileInterpreter) {
2369       // Out-of-line code to allocate method data oop.
2370       __ bind(profile_method);
2371 
2372       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2373       __ set_method_data_pointer_for_bcp();
2374       // reload next bytecode
2375       __ ldrb(R3_bytecode, Address(Rbcp));
2376       __ b(dispatch);
2377     }
2378 
2379     if (UseOnStackReplacement) {
2380       // invocation counter overflow
2381       __ bind(backedge_counter_overflow);
2382 
2383       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2384       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2385 
2386       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2387       const Register Rnmethod = R0;
2388 
2389       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2390 
2391       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2392 
2393       // nmethod may have been invalidated (VM may block upon call_VM return)
2394       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2395       __ cmp(R1_tmp, nmethod::in_use);
2396       __ b(dispatch, ne);
2397 
2398       // We have the address of an on stack replacement routine in Rnmethod,
2399       // We need to prepare to execute the OSR method. First we must
2400       // migrate the locals and monitors off of the stack.
2401 
2402       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2403 
2404       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2405 
2406       // R0 is OSR buffer
2407 
2408       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2409       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2410 
2411 #ifdef AARCH64
2412       __ ldp(FP, LR, Address(FP));
2413       __ mov(SP, Rtemp);
2414 #else
2415       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2416       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2417 #endif // AARCH64
2418 
2419       __ jump(R1_tmp);
2420     }
2421   }
2422 }
2423 
2424 
2425 void TemplateTable::if_0cmp(Condition cc) {
2426   transition(itos, vtos);
2427   // assume branch is more often taken than not (loops use backward branches)
2428   Label not_taken;
2429 #ifdef AARCH64
2430   if (cc == equal) {
2431     __ cbnz_w(R0_tos, not_taken);
2432   } else if (cc == not_equal) {
2433     __ cbz_w(R0_tos, not_taken);
2434   } else {
2435     __ cmp_32(R0_tos, 0);
2436     __ b(not_taken, convNegCond(cc));
2437   }
2438 #else
2439   __ cmp_32(R0_tos, 0);
2440   __ b(not_taken, convNegCond(cc));
2441 #endif // AARCH64
2442   branch(false, false);
2443   __ bind(not_taken);
2444   __ profile_not_taken_branch(R0_tmp);
2445 }
2446 
2447 
2448 void TemplateTable::if_icmp(Condition cc) {
2449   transition(itos, vtos);
2450   // assume branch is more often taken than not (loops use backward branches)
2451   Label not_taken;
2452   __ pop_i(R1_tmp);
2453   __ cmp_32(R1_tmp, R0_tos);
2454   __ b(not_taken, convNegCond(cc));
2455   branch(false, false);
2456   __ bind(not_taken);
2457   __ profile_not_taken_branch(R0_tmp);
2458 }
2459 
2460 
2461 void TemplateTable::if_nullcmp(Condition cc) {
2462   transition(atos, vtos);
2463   assert(cc == equal || cc == not_equal, "invalid condition");
2464 
2465   // assume branch is more often taken than not (loops use backward branches)
2466   Label not_taken;
2467   if (cc == equal) {
2468     __ cbnz(R0_tos, not_taken);
2469   } else {
2470     __ cbz(R0_tos, not_taken);
2471   }
2472   branch(false, false);
2473   __ bind(not_taken);
2474   __ profile_not_taken_branch(R0_tmp);
2475 }
2476 
2477 
2478 void TemplateTable::if_acmp(Condition cc) {
2479   transition(atos, vtos);
2480   // assume branch is more often taken than not (loops use backward branches)
2481   Label not_taken;
2482   __ pop_ptr(R1_tmp);
2483   __ cmp(R1_tmp, R0_tos);
2484   __ b(not_taken, convNegCond(cc));
2485   branch(false, false);
2486   __ bind(not_taken);
2487   __ profile_not_taken_branch(R0_tmp);
2488 }
2489 
2490 
2491 void TemplateTable::ret() {
2492   transition(vtos, vtos);
2493   const Register Rlocal_index = R1_tmp;
2494   const Register Rret_bci = Rtmp_save0; // R4/R19
2495 
2496   locals_index(Rlocal_index);
2497   Address local = load_iaddress(Rlocal_index, Rtemp);
2498   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2499   __ profile_ret(Rtmp_save1, Rret_bci);
2500   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2501   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2502   __ add(Rbcp, Rtemp, Rret_bci);
2503   __ dispatch_next(vtos);
2504 }
2505 
2506 
2507 void TemplateTable::wide_ret() {
2508   transition(vtos, vtos);
2509   const Register Rlocal_index = R1_tmp;
2510   const Register Rret_bci = Rtmp_save0; // R4/R19
2511 
2512   locals_index_wide(Rlocal_index);
2513   Address local = load_iaddress(Rlocal_index, Rtemp);
2514   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2515   __ profile_ret(Rtmp_save1, Rret_bci);
2516   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2517   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2518   __ add(Rbcp, Rtemp, Rret_bci);
2519   __ dispatch_next(vtos);
2520 }
2521 
2522 
2523 void TemplateTable::tableswitch() {
2524   transition(itos, vtos);
2525 
2526   const Register Rindex  = R0_tos;
2527 #ifndef AARCH64
2528   const Register Rtemp2  = R1_tmp;
2529 #endif // !AARCH64
2530   const Register Rabcp   = R2_tmp;  // aligned bcp
2531   const Register Rlow    = R3_tmp;
2532   const Register Rhigh   = R4_tmp;
2533   const Register Roffset = R5_tmp;
2534 
2535   // align bcp
2536   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2537   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2538 
2539   // load lo & hi
2540 #ifdef AARCH64
2541   __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2542 #else
2543   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2544 #endif // AARCH64
2545   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2546   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2547 
2548   // compare index with high bound
2549   __ cmp_32(Rhigh, Rindex);
2550 
2551 #ifdef AARCH64
2552   Label default_case, do_dispatch;
2553   __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2554   __ b(default_case, lt);
2555 
2556   __ sub_w(Rindex, Rindex, Rlow);
2557   __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2558   if(ProfileInterpreter) {
2559     __ sxtw(Rindex, Rindex);
2560     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2561   }
2562   __ b(do_dispatch);
2563 
2564   __ bind(default_case);
2565   __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2566   if(ProfileInterpreter) {
2567     __ profile_switch_default(R0_tmp);
2568   }
2569 
2570   __ bind(do_dispatch);
2571 #else
2572 
2573   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2574   __ subs(Rindex, Rindex, Rlow, ge);
2575 
2576   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2577   // ("ge" status accumulated from cmp and subs instructions) then load
2578   // offset from table, otherwise load offset for default case
2579 
2580   if(ProfileInterpreter) {
2581     Label default_case, continue_execution;
2582 
2583     __ b(default_case, lt);
2584     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2585     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2586     __ b(continue_execution);
2587 
2588     __ bind(default_case);
2589     __ profile_switch_default(R0_tmp);
2590     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2591 
2592     __ bind(continue_execution);
2593   } else {
2594     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2595     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2596   }
2597 #endif // AARCH64
2598 
2599   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2600 
2601   // load the next bytecode to R3_bytecode and advance Rbcp
2602 #ifdef AARCH64
2603   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2604   __ ldrb(R3_bytecode, Address(Rbcp));
2605 #else
2606   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2607 #endif // AARCH64
2608   __ dispatch_only(vtos);
2609 
2610 }
2611 
2612 
2613 void TemplateTable::lookupswitch() {
2614   transition(itos, itos);
2615   __ stop("lookupswitch bytecode should have been rewritten");
2616 }
2617 
2618 
2619 void TemplateTable::fast_linearswitch() {
2620   transition(itos, vtos);
2621   Label loop, found, default_case, continue_execution;
2622 
2623   const Register Rkey     = R0_tos;
2624   const Register Rabcp    = R2_tmp;  // aligned bcp
2625   const Register Rdefault = R3_tmp;
2626   const Register Rcount   = R4_tmp;
2627   const Register Roffset  = R5_tmp;
2628 
2629   // bswap Rkey, so we can avoid bswapping the table entries
2630   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2631 
2632   // align bcp
2633   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2634   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2635 
2636   // load default & counter
2637 #ifdef AARCH64
2638   __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2639 #else
2640   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2641 #endif // AARCH64
2642   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2643 
2644 #ifdef AARCH64
2645   __ cbz_w(Rcount, default_case);
2646 #else
2647   __ cmp_32(Rcount, 0);
2648   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2649   __ b(default_case, eq);
2650 #endif // AARCH64
2651 
2652   // table search
2653   __ bind(loop);
2654 #ifdef AARCH64
2655   __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2656 #endif // AARCH64
2657   __ cmp_32(Rtemp, Rkey);
2658   __ b(found, eq);
2659   __ subs(Rcount, Rcount, 1);
2660 #ifndef AARCH64
2661   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2662 #endif // !AARCH64
2663   __ b(loop, ne);
2664 
2665   // default case
2666   __ bind(default_case);
2667   __ profile_switch_default(R0_tmp);
2668   __ mov(Roffset, Rdefault);
2669   __ b(continue_execution);
2670 
2671   // entry found -> get offset
2672   __ bind(found);
2673   // Rabcp is already incremented and points to the next entry
2674   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2675   if (ProfileInterpreter) {
2676     // Calculate index of the selected case.
2677     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2678 
2679     // align bcp
2680     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2681     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2682 
2683     // load number of cases
2684     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2685     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2686 
2687     // Selected index = <number of cases> - <current loop count>
2688     __ sub(R1_tmp, R2_tmp, Rcount);
2689     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2690   }
2691 
2692   // continue execution
2693   __ bind(continue_execution);
2694   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2695 
2696   // load the next bytecode to R3_bytecode and advance Rbcp
2697 #ifdef AARCH64
2698   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2699   __ ldrb(R3_bytecode, Address(Rbcp));
2700 #else
2701   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2702 #endif // AARCH64
2703   __ dispatch_only(vtos);
2704 }
2705 
2706 
2707 void TemplateTable::fast_binaryswitch() {
2708   transition(itos, vtos);
2709   // Implementation using the following core algorithm:
2710   //
2711   // int binary_search(int key, LookupswitchPair* array, int n) {
2712   //   // Binary search according to "Methodik des Programmierens" by
2713   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2714   //   int i = 0;
2715   //   int j = n;
2716   //   while (i+1 < j) {
2717   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2718   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2719   //     // where a stands for the array and assuming that the (inexisting)
2720   //     // element a[n] is infinitely big.
2721   //     int h = (i + j) >> 1;
2722   //     // i < h < j
2723   //     if (key < array[h].fast_match()) {
2724   //       j = h;
2725   //     } else {
2726   //       i = h;
2727   //     }
2728   //   }
2729   //   // R: a[i] <= key < a[i+1] or Q
2730   //   // (i.e., if key is within array, i is the correct index)
2731   //   return i;
2732   // }
2733 
2734   // register allocation
2735   const Register key    = R0_tos;                // already set (tosca)
2736   const Register array  = R1_tmp;
2737   const Register i      = R2_tmp;
2738   const Register j      = R3_tmp;
2739   const Register h      = R4_tmp;
2740   const Register val    = R5_tmp;
2741   const Register temp1  = Rtemp;
2742   const Register temp2  = LR_tmp;
2743   const Register offset = R3_tmp;
2744 
2745   // set 'array' = aligned bcp + 2 ints
2746   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2747   __ align_reg(array, temp1, BytesPerInt);
2748 
2749   // initialize i & j
2750   __ mov(i, 0);                                  // i = 0;
2751   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2752   // Convert j into native byteordering
2753   __ byteswap_u32(j, temp1, temp2);
2754 
2755   // and start
2756   Label entry;
2757   __ b(entry);
2758 
2759   // binary search loop
2760   { Label loop;
2761     __ bind(loop);
2762     // int h = (i + j) >> 1;
2763     __ add(h, i, j);                             // h = i + j;
2764     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2765     // if (key < array[h].fast_match()) {
2766     //   j = h;
2767     // } else {
2768     //   i = h;
2769     // }
2770 #ifdef AARCH64
2771     __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2772     __ ldr_s32(val, Address(temp1));
2773 #else
2774     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2775 #endif // AARCH64
2776     // Convert array[h].match to native byte-ordering before compare
2777     __ byteswap_u32(val, temp1, temp2);
2778     __ cmp_32(key, val);
2779     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2780     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2781     // while (i+1 < j)
2782     __ bind(entry);
2783     __ add(temp1, i, 1);                             // i+1
2784     __ cmp(temp1, j);                                // i+1 < j
2785     __ b(loop, lt);
2786   }
2787 
2788   // end of binary search, result index is i (must check again!)
2789   Label default_case;
2790   // Convert array[i].match to native byte-ordering before compare
2791 #ifdef AARCH64
2792   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2793   __ ldr_s32(val, Address(temp1));
2794 #else
2795   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2796 #endif // AARCH64
2797   __ byteswap_u32(val, temp1, temp2);
2798   __ cmp_32(key, val);
2799   __ b(default_case, ne);
2800 
2801   // entry found
2802   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2803   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2804   __ profile_switch_case(R0, i, R1, i);
2805   __ byteswap_u32(offset, temp1, temp2);
2806 #ifdef AARCH64
2807   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2808   __ ldrb(R3_bytecode, Address(Rbcp));
2809 #else
2810   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2811 #endif // AARCH64
2812   __ dispatch_only(vtos);
2813 
2814   // default case
2815   __ bind(default_case);
2816   __ profile_switch_default(R0);
2817   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2818   __ byteswap_u32(offset, temp1, temp2);
2819 #ifdef AARCH64
2820   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2821   __ ldrb(R3_bytecode, Address(Rbcp));
2822 #else
2823   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2824 #endif // AARCH64
2825   __ dispatch_only(vtos);
2826 }
2827 
2828 
2829 void TemplateTable::_return(TosState state) {
2830   transition(state, state);
2831   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2832 
2833   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2834     Label skip_register_finalizer;
2835     assert(state == vtos, "only valid state");
2836     __ ldr(R1, aaddress(0));
2837     __ load_klass(Rtemp, R1);
2838     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2839     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2840 
2841     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2842 
2843     __ bind(skip_register_finalizer);
2844   }
2845 
2846   // Narrow result if state is itos but result type is smaller.
2847   // Need to narrow in the return bytecode rather than in generate_return_entry
2848   // since compiled code callers expect the result to already be narrowed.
2849   if (state == itos) {
2850     __ narrow(R0_tos);
2851   }
2852   __ remove_activation(state, LR);
2853 
2854   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2855 
2856 #ifndef AARCH64
2857   // According to interpreter calling conventions, result is returned in R0/R1,
2858   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2859   // This conversion should be done after remove_activation, as it uses
2860   // push(state) & pop(state) to preserve return value.
2861   __ convert_tos_to_retval(state);
2862 #endif // !AARCH64
2863 
2864   __ ret();
2865 
2866   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2867   __ nop();
2868 }
2869 
2870 
2871 // ----------------------------------------------------------------------------
2872 // Volatile variables demand their effects be made known to all CPU's in
2873 // order.  Store buffers on most chips allow reads & writes to reorder; the
2874 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2875 // memory barrier (i.e., it's not sufficient that the interpreter does not
2876 // reorder volatile references, the hardware also must not reorder them).
2877 //
2878 // According to the new Java Memory Model (JMM):
2879 // (1) All volatiles are serialized wrt to each other.
2880 // ALSO reads & writes act as aquire & release, so:
2881 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2882 // the read float up to before the read.  It's OK for non-volatile memory refs
2883 // that happen before the volatile read to float down below it.
2884 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2885 // that happen BEFORE the write float down to after the write.  It's OK for
2886 // non-volatile memory refs that happen after the volatile write to float up
2887 // before it.
2888 //
2889 // We only put in barriers around volatile refs (they are expensive), not
2890 // _between_ memory refs (that would require us to track the flavor of the
2891 // previous memory refs).  Requirements (2) and (3) require some barriers
2892 // before volatile stores and after volatile loads.  These nearly cover
2893 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2894 // case is placed after volatile-stores although it could just as well go
2895 // before volatile-loads.
2896 // TODO-AARCH64: consider removing extra unused parameters
2897 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2898                                      Register tmp,
2899                                      bool preserve_flags,
2900                                      Register load_tgt) {
2901 #ifdef AARCH64
2902   __ membar(order_constraint);
2903 #else
2904   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2905 #endif
2906 }
2907 
2908 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2909 void TemplateTable::resolve_cache_and_index(int byte_no,
2910                                             Register Rcache,
2911                                             Register Rindex,
2912                                             size_t index_size) {
2913   assert_different_registers(Rcache, Rindex, Rtemp);
2914 
2915   Label resolved;
2916   Bytecodes::Code code = bytecode();
2917   switch (code) {
2918   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2919   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2920   }
2921 
2922   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2923   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
2924   __ cmp(Rtemp, code);  // have we resolved this bytecode?
2925   __ b(resolved, eq);
2926 
2927   // resolve first time through
2928   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2929   __ mov(R1, code);
2930   __ call_VM(noreg, entry, R1);
2931   // Update registers with resolved info
2932   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
2933   __ bind(resolved);
2934 }
2935 
2936 
2937 // The Rcache and Rindex registers must be set before call
2938 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
2939                                               Register Rindex,
2940                                               Register Roffset,
2941                                               Register Rflags,
2942                                               Register Robj,
2943                                               bool is_static = false) {
2944 
2945   assert_different_registers(Rcache, Rindex, Rtemp);
2946   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
2947 
2948   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2949 
2950   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2951 
2952   // Field offset
2953   __ ldr(Roffset, Address(Rtemp,
2954            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2955 
2956   // Flags
2957   __ ldr_u32(Rflags, Address(Rtemp,
2958            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2959 
2960   if (is_static) {
2961     __ ldr(Robj, Address(Rtemp,
2962              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2963     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2964     __ ldr(Robj, Address(Robj, mirror_offset));
2965     __ resolve_oop_handle(Robj);
2966   }
2967 }
2968 
2969 
2970 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2971 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2972                                                Register method,
2973                                                Register itable_index,
2974                                                Register flags,
2975                                                bool is_invokevirtual,
2976                                                bool is_invokevfinal/*unused*/,
2977                                                bool is_invokedynamic) {
2978   // setup registers
2979   const Register cache = R2_tmp;
2980   const Register index = R3_tmp;
2981   const Register temp_reg = Rtemp;
2982   assert_different_registers(cache, index, temp_reg);
2983   assert_different_registers(method, itable_index, temp_reg);
2984 
2985   // determine constant pool cache field offsets
2986   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2987   const int method_offset = in_bytes(
2988     ConstantPoolCache::base_offset() +
2989       ((byte_no == f2_byte)
2990        ? ConstantPoolCacheEntry::f2_offset()
2991        : ConstantPoolCacheEntry::f1_offset()
2992       )
2993     );
2994   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2995                                     ConstantPoolCacheEntry::flags_offset());
2996   // access constant pool cache fields
2997   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2998                                     ConstantPoolCacheEntry::f2_offset());
2999 
3000   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
3001   resolve_cache_and_index(byte_no, cache, index, index_size);
3002     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
3003     __ ldr(method, Address(temp_reg, method_offset));
3004 
3005   if (itable_index != noreg) {
3006     __ ldr(itable_index, Address(temp_reg, index_offset));
3007   }
3008   __ ldr_u32(flags, Address(temp_reg, flags_offset));
3009 }
3010 
3011 
3012 // The registers cache and index expected to be set before call, and should not be Rtemp.
3013 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3014 // except cache and index registers which are preserved.
3015 void TemplateTable::jvmti_post_field_access(Register Rcache,
3016                                             Register Rindex,
3017                                             bool is_static,
3018                                             bool has_tos) {
3019   assert_different_registers(Rcache, Rindex, Rtemp);
3020 
3021   if (__ can_post_field_access()) {
3022     // Check to see if a field access watch has been set before we take
3023     // the time to call into the VM.
3024 
3025     Label Lcontinue;
3026 
3027     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
3028     __ cbz(Rtemp, Lcontinue);
3029 
3030     // cache entry pointer
3031     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3032     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
3033     if (is_static) {
3034       __ mov(R1, 0);        // NULL object reference
3035     } else {
3036       __ pop(atos);         // Get the object
3037       __ mov(R1, R0_tos);
3038       __ verify_oop(R1);
3039       __ push(atos);        // Restore stack state
3040     }
3041     // R1: object pointer or NULL
3042     // R2: cache entry pointer
3043     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3044                R1, R2);
3045     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3046 
3047     __ bind(Lcontinue);
3048   }
3049 }
3050 
3051 
3052 void TemplateTable::pop_and_check_object(Register r) {
3053   __ pop_ptr(r);
3054   __ null_check(r, Rtemp);  // for field access must check obj.
3055   __ verify_oop(r);
3056 }
3057 
3058 
3059 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3060   transition(vtos, vtos);
3061 
3062   const Register Roffset  = R2_tmp;
3063   const Register Robj     = R3_tmp;
3064   const Register Rcache   = R4_tmp;
3065   const Register Rflagsav = Rtmp_save0;  // R4/R19
3066   const Register Rindex   = R5_tmp;
3067   const Register Rflags   = R5_tmp;
3068 
3069   const bool gen_volatile_check = os::is_MP();
3070 
3071   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3072   jvmti_post_field_access(Rcache, Rindex, is_static, false);
3073   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3074 
3075   if (gen_volatile_check) {
3076     __ mov(Rflagsav, Rflags);
3077   }
3078 
3079   if (!is_static) pop_and_check_object(Robj);
3080 
3081   Label Done, Lint, Ltable, shouldNotReachHere;
3082   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3083 
3084   // compute type
3085   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3086   // Make sure we don't need to mask flags after the above shift
3087   ConstantPoolCacheEntry::verify_tos_state_shift();
3088 
3089   // There are actually two versions of implementation of getfield/getstatic:
3090   //
3091   // 32-bit ARM:
3092   // 1) Table switch using add(PC,...) instruction (fast_version)
3093   // 2) Table switch using ldr(PC,...) instruction
3094   //
3095   // AArch64:
3096   // 1) Table switch using adr/add/br instructions (fast_version)
3097   // 2) Table switch using adr/ldr/br instructions
3098   //
3099   // First version requires fixed size of code block for each case and
3100   // can not be used in RewriteBytecodes and VerifyOops
3101   // modes.
3102 
3103   // Size of fixed size code block for fast_version
3104   const int log_max_block_size = 2;
3105   const int max_block_size = 1 << log_max_block_size;
3106 
3107   // Decide if fast version is enabled
3108   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3109 
3110   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3111   // atos requires additional processing in slow version.
3112   // On AArch64 atos and itos cannot be merged.
3113   bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3114 
3115   assert(number_of_states == 10, "number of tos states should be equal to 9");
3116 
3117   __ cmp(Rflags, itos);
3118 #ifdef AARCH64
3119   __ b(Lint, eq);
3120 
3121   if(fast_version) {
3122     __ adr(Rtemp, Lbtos);
3123     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3124     __ br(Rtemp);
3125   } else {
3126     __ adr(Rtemp, Ltable);
3127     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3128     __ br(Rtemp);
3129   }
3130 #else
3131   if(atos_merged_with_itos) {
3132     __ cmp(Rflags, atos, ne);
3133   }
3134 
3135   // table switch by type
3136   if(fast_version) {
3137     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3138   } else {
3139     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3140   }
3141 
3142   // jump to itos/atos case
3143   __ b(Lint);
3144 #endif // AARCH64
3145 
3146   // table with addresses for slow version
3147   if (fast_version) {
3148     // nothing to do
3149   } else  {
3150     AARCH64_ONLY(__ align(wordSize));
3151     __ bind(Ltable);
3152     __ emit_address(Lbtos);
3153     __ emit_address(Lztos);
3154     __ emit_address(Lctos);
3155     __ emit_address(Lstos);
3156     __ emit_address(Litos);
3157     __ emit_address(Lltos);
3158     __ emit_address(Lftos);
3159     __ emit_address(Ldtos);
3160     __ emit_address(Latos);
3161   }
3162 
3163 #ifdef ASSERT
3164   int seq = 0;
3165 #endif
3166   // btos
3167   {
3168     assert(btos == seq++, "btos has unexpected value");
3169     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3170     __ bind(Lbtos);
3171     __ ldrsb(R0_tos, Address(Robj, Roffset));
3172     __ push(btos);
3173     // Rewrite bytecode to be faster
3174     if (!is_static && rc == may_rewrite) {
3175       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3176     }
3177     __ b(Done);
3178   }
3179 
3180   // ztos (same as btos for getfield)
3181   {
3182     assert(ztos == seq++, "btos has unexpected value");
3183     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3184     __ bind(Lztos);
3185     __ ldrsb(R0_tos, Address(Robj, Roffset));
3186     __ push(ztos);
3187     // Rewrite bytecode to be faster (use btos fast getfield)
3188     if (!is_static && rc == may_rewrite) {
3189       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3190     }
3191     __ b(Done);
3192   }
3193 
3194   // ctos
3195   {
3196     assert(ctos == seq++, "ctos has unexpected value");
3197     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3198     __ bind(Lctos);
3199     __ ldrh(R0_tos, Address(Robj, Roffset));
3200     __ push(ctos);
3201     if (!is_static && rc == may_rewrite) {
3202       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3203     }
3204     __ b(Done);
3205   }
3206 
3207   // stos
3208   {
3209     assert(stos == seq++, "stos has unexpected value");
3210     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3211     __ bind(Lstos);
3212     __ ldrsh(R0_tos, Address(Robj, Roffset));
3213     __ push(stos);
3214     if (!is_static && rc == may_rewrite) {
3215       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3216     }
3217     __ b(Done);
3218   }
3219 
3220   // itos
3221   {
3222     assert(itos == seq++, "itos has unexpected value");
3223     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3224     __ bind(Litos);
3225     __ b(shouldNotReachHere);
3226   }
3227 
3228   // ltos
3229   {
3230     assert(ltos == seq++, "ltos has unexpected value");
3231     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3232     __ bind(Lltos);
3233 #ifdef AARCH64
3234     __ ldr(R0_tos, Address(Robj, Roffset));
3235 #else
3236     __ add(Roffset, Robj, Roffset);
3237     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3238 #endif // AARCH64
3239     __ push(ltos);
3240     if (!is_static && rc == may_rewrite) {
3241       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3242     }
3243     __ b(Done);
3244   }
3245 
3246   // ftos
3247   {
3248     assert(ftos == seq++, "ftos has unexpected value");
3249     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3250     __ bind(Lftos);
3251     // floats and ints are placed on stack in same way, so
3252     // we can use push(itos) to transfer value without using VFP
3253     __ ldr_u32(R0_tos, Address(Robj, Roffset));
3254     __ push(itos);
3255     if (!is_static && rc == may_rewrite) {
3256       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3257     }
3258     __ b(Done);
3259   }
3260 
3261   // dtos
3262   {
3263     assert(dtos == seq++, "dtos has unexpected value");
3264     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3265     __ bind(Ldtos);
3266     // doubles and longs are placed on stack in the same way, so
3267     // we can use push(ltos) to transfer value without using VFP
3268 #ifdef AARCH64
3269     __ ldr(R0_tos, Address(Robj, Roffset));
3270 #else
3271     __ add(Rtemp, Robj, Roffset);
3272     __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3273 #endif // AARCH64
3274     __ push(ltos);
3275     if (!is_static && rc == may_rewrite) {
3276       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3277     }
3278     __ b(Done);
3279   }
3280 
3281   // atos
3282   {
3283     assert(atos == seq++, "atos has unexpected value");
3284 
3285     // atos case for AArch64 and slow version on 32-bit ARM
3286     if(!atos_merged_with_itos) {
3287       __ bind(Latos);
3288       __ load_heap_oop(R0_tos, Address(Robj, Roffset));
3289       __ push(atos);
3290       // Rewrite bytecode to be faster
3291       if (!is_static && rc == may_rewrite) {
3292         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3293       }
3294       __ b(Done);
3295     }
3296   }
3297 
3298   assert(vtos == seq++, "vtos has unexpected value");
3299 
3300   __ bind(shouldNotReachHere);
3301   __ should_not_reach_here();
3302 
3303   // itos and atos cases are frequent so it makes sense to move them out of table switch
3304   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3305 
3306   __ bind(Lint);
3307   __ ldr_s32(R0_tos, Address(Robj, Roffset));
3308   __ push(itos);
3309   // Rewrite bytecode to be faster
3310   if (!is_static && rc == may_rewrite) {
3311     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3312   }
3313 
3314   __ bind(Done);
3315 
3316   if (gen_volatile_check) {
3317     // Check for volatile field
3318     Label notVolatile;
3319     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3320 
3321     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3322 
3323     __ bind(notVolatile);
3324   }
3325 
3326 }
3327 
3328 void TemplateTable::getfield(int byte_no) {
3329   getfield_or_static(byte_no, false);
3330 }
3331 
3332 void TemplateTable::nofast_getfield(int byte_no) {
3333   getfield_or_static(byte_no, false, may_not_rewrite);
3334 }
3335 
3336 void TemplateTable::getstatic(int byte_no) {
3337   getfield_or_static(byte_no, true);
3338 }
3339 
3340 
3341 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3342 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3343 // except cache and index registers which are preserved.
3344 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3345   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3346   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3347 
3348   if (__ can_post_field_modification()) {
3349     // Check to see if a field modification watch has been set before we take
3350     // the time to call into the VM.
3351     Label Lcontinue;
3352 
3353     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3354     __ cbz(Rtemp, Lcontinue);
3355 
3356     if (is_static) {
3357       // Life is simple.  Null out the object pointer.
3358       __ mov(R1, 0);
3359     } else {
3360       // Life is harder. The stack holds the value on top, followed by the object.
3361       // We don't know the size of the value, though; it could be one or two words
3362       // depending on its type. As a result, we must find the type to determine where
3363       // the object is.
3364 
3365       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3366       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3367 
3368       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3369       // Make sure we don't need to mask Rtemp after the above shift
3370       ConstantPoolCacheEntry::verify_tos_state_shift();
3371 
3372       __ cmp(Rtemp, ltos);
3373       __ cond_cmp(Rtemp, dtos, ne);
3374 #ifdef AARCH64
3375       __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3376       __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3377       __ mov(R1, Rtemp, eq);
3378       __ ldr(R1, Address(Rstack_top, R1));
3379 #else
3380       // two word value (ltos/dtos)
3381       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3382 
3383       // one word value (not ltos, dtos)
3384       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3385 #endif // AARCH64
3386     }
3387 
3388     // cache entry pointer
3389     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3390     __ add(R2, R2, in_bytes(cp_base_offset));
3391 
3392     // object (tos)
3393     __ mov(R3, Rstack_top);
3394 
3395     // R1: object pointer set up above (NULL if static)
3396     // R2: cache entry pointer
3397     // R3: value object on the stack
3398     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3399                R1, R2, R3);
3400     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3401 
3402     __ bind(Lcontinue);
3403   }
3404 }
3405 
3406 
3407 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3408   transition(vtos, vtos);
3409 
3410   const Register Roffset  = R2_tmp;
3411   const Register Robj     = R3_tmp;
3412   const Register Rcache   = R4_tmp;
3413   const Register Rflagsav = Rtmp_save0;  // R4/R19
3414   const Register Rindex   = R5_tmp;
3415   const Register Rflags   = R5_tmp;
3416 
3417   const bool gen_volatile_check = os::is_MP();
3418 
3419   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3420   jvmti_post_field_mod(Rcache, Rindex, is_static);
3421   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3422 
3423   if (gen_volatile_check) {
3424     // Check for volatile field
3425     Label notVolatile;
3426     __ mov(Rflagsav, Rflags);
3427     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3428 
3429     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3430 
3431     __ bind(notVolatile);
3432   }
3433 
3434   Label Done, Lint, shouldNotReachHere;
3435   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3436 
3437   // compute type
3438   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3439   // Make sure we don't need to mask flags after the above shift
3440   ConstantPoolCacheEntry::verify_tos_state_shift();
3441 
3442   // There are actually two versions of implementation of putfield/putstatic:
3443   //
3444   // 32-bit ARM:
3445   // 1) Table switch using add(PC,...) instruction (fast_version)
3446   // 2) Table switch using ldr(PC,...) instruction
3447   //
3448   // AArch64:
3449   // 1) Table switch using adr/add/br instructions (fast_version)
3450   // 2) Table switch using adr/ldr/br instructions
3451   //
3452   // First version requires fixed size of code block for each case and
3453   // can not be used in RewriteBytecodes and VerifyOops
3454   // modes.
3455 
3456   // Size of fixed size code block for fast_version (in instructions)
3457   const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3458   const int max_block_size = 1 << log_max_block_size;
3459 
3460   // Decide if fast version is enabled
3461   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3462 
3463   assert(number_of_states == 10, "number of tos states should be equal to 9");
3464 
3465   // itos case is frequent and is moved outside table switch
3466   __ cmp(Rflags, itos);
3467 
3468 #ifdef AARCH64
3469   __ b(Lint, eq);
3470 
3471   if (fast_version) {
3472     __ adr(Rtemp, Lbtos);
3473     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3474     __ br(Rtemp);
3475   } else {
3476     __ adr(Rtemp, Ltable);
3477     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3478     __ br(Rtemp);
3479   }
3480 #else
3481   // table switch by type
3482   if (fast_version) {
3483     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3484   } else  {
3485     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3486   }
3487 
3488   // jump to itos case
3489   __ b(Lint);
3490 #endif // AARCH64
3491 
3492   // table with addresses for slow version
3493   if (fast_version) {
3494     // nothing to do
3495   } else  {
3496     AARCH64_ONLY(__ align(wordSize));
3497     __ bind(Ltable);
3498     __ emit_address(Lbtos);
3499     __ emit_address(Lztos);
3500     __ emit_address(Lctos);
3501     __ emit_address(Lstos);
3502     __ emit_address(Litos);
3503     __ emit_address(Lltos);
3504     __ emit_address(Lftos);
3505     __ emit_address(Ldtos);
3506     __ emit_address(Latos);
3507   }
3508 
3509 #ifdef ASSERT
3510   int seq = 0;
3511 #endif
3512   // btos
3513   {
3514     assert(btos == seq++, "btos has unexpected value");
3515     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3516     __ bind(Lbtos);
3517     __ pop(btos);
3518     if (!is_static) pop_and_check_object(Robj);
3519     __ strb(R0_tos, Address(Robj, Roffset));
3520     if (!is_static && rc == may_rewrite) {
3521       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3522     }
3523     __ b(Done);
3524   }
3525 
3526   // ztos
3527   {
3528     assert(ztos == seq++, "ztos has unexpected value");
3529     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3530     __ bind(Lztos);
3531     __ pop(ztos);
3532     if (!is_static) pop_and_check_object(Robj);
3533     __ and_32(R0_tos, R0_tos, 1);
3534     __ strb(R0_tos, Address(Robj, Roffset));
3535     if (!is_static && rc == may_rewrite) {
3536       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3537     }
3538     __ b(Done);
3539   }
3540 
3541   // ctos
3542   {
3543     assert(ctos == seq++, "ctos has unexpected value");
3544     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3545     __ bind(Lctos);
3546     __ pop(ctos);
3547     if (!is_static) pop_and_check_object(Robj);
3548     __ strh(R0_tos, Address(Robj, Roffset));
3549     if (!is_static && rc == may_rewrite) {
3550       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3551     }
3552     __ b(Done);
3553   }
3554 
3555   // stos
3556   {
3557     assert(stos == seq++, "stos has unexpected value");
3558     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3559     __ bind(Lstos);
3560     __ pop(stos);
3561     if (!is_static) pop_and_check_object(Robj);
3562     __ strh(R0_tos, Address(Robj, Roffset));
3563     if (!is_static && rc == may_rewrite) {
3564       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3565     }
3566     __ b(Done);
3567   }
3568 
3569   // itos
3570   {
3571     assert(itos == seq++, "itos has unexpected value");
3572     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3573     __ bind(Litos);
3574     __ b(shouldNotReachHere);
3575   }
3576 
3577   // ltos
3578   {
3579     assert(ltos == seq++, "ltos has unexpected value");
3580     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3581     __ bind(Lltos);
3582     __ pop(ltos);
3583     if (!is_static) pop_and_check_object(Robj);
3584 #ifdef AARCH64
3585     __ str(R0_tos, Address(Robj, Roffset));
3586 #else
3587     __ add(Roffset, Robj, Roffset);
3588     __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3589 #endif // AARCH64
3590     if (!is_static && rc == may_rewrite) {
3591       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3592     }
3593     __ b(Done);
3594   }
3595 
3596   // ftos
3597   {
3598     assert(ftos == seq++, "ftos has unexpected value");
3599     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3600     __ bind(Lftos);
3601     // floats and ints are placed on stack in the same way, so
3602     // we can use pop(itos) to transfer value without using VFP
3603     __ pop(itos);
3604     if (!is_static) pop_and_check_object(Robj);
3605     __ str_32(R0_tos, Address(Robj, Roffset));
3606     if (!is_static && rc == may_rewrite) {
3607       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3608     }
3609     __ b(Done);
3610   }
3611 
3612   // dtos
3613   {
3614     assert(dtos == seq++, "dtos has unexpected value");
3615     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3616     __ bind(Ldtos);
3617     // doubles and longs are placed on stack in the same way, so
3618     // we can use pop(ltos) to transfer value without using VFP
3619     __ pop(ltos);
3620     if (!is_static) pop_and_check_object(Robj);
3621 #ifdef AARCH64
3622     __ str(R0_tos, Address(Robj, Roffset));
3623 #else
3624     __ add(Rtemp, Robj, Roffset);
3625     __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3626 #endif // AARCH64
3627     if (!is_static && rc == may_rewrite) {
3628       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3629     }
3630     __ b(Done);
3631   }
3632 
3633   // atos
3634   {
3635     assert(atos == seq++, "dtos has unexpected value");
3636     __ bind(Latos);
3637     __ pop(atos);
3638     if (!is_static) pop_and_check_object(Robj);
3639     // Store into the field
3640     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, _bs->kind(), false, false);
3641     if (!is_static && rc == may_rewrite) {
3642       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3643     }
3644     __ b(Done);
3645   }
3646 
3647   __ bind(shouldNotReachHere);
3648   __ should_not_reach_here();
3649 
3650   // itos case is frequent and is moved outside table switch
3651   __ bind(Lint);
3652   __ pop(itos);
3653   if (!is_static) pop_and_check_object(Robj);
3654   __ str_32(R0_tos, Address(Robj, Roffset));
3655   if (!is_static && rc == may_rewrite) {
3656     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3657   }
3658 
3659   __ bind(Done);
3660 
3661   if (gen_volatile_check) {
3662     Label notVolatile;
3663     if (is_static) {
3664       // Just check for volatile. Memory barrier for static final field
3665       // is handled by class initialization.
3666       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3667       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3668       __ bind(notVolatile);
3669     } else {
3670       // Check for volatile field and final field
3671       Label skipMembar;
3672 
3673       __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3674                        1 << ConstantPoolCacheEntry::is_final_shift);
3675       __ b(skipMembar, eq);
3676 
3677       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3678 
3679       // StoreLoad barrier after volatile field write
3680       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3681       __ b(skipMembar);
3682 
3683       // StoreStore barrier after final field write
3684       __ bind(notVolatile);
3685       volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3686 
3687       __ bind(skipMembar);
3688     }
3689   }
3690 
3691 }
3692 
3693 void TemplateTable::putfield(int byte_no) {
3694   putfield_or_static(byte_no, false);
3695 }
3696 
3697 void TemplateTable::nofast_putfield(int byte_no) {
3698   putfield_or_static(byte_no, false, may_not_rewrite);
3699 }
3700 
3701 void TemplateTable::putstatic(int byte_no) {
3702   putfield_or_static(byte_no, true);
3703 }
3704 
3705 
3706 void TemplateTable::jvmti_post_fast_field_mod() {
3707   // This version of jvmti_post_fast_field_mod() is not used on ARM
3708   Unimplemented();
3709 }
3710 
3711 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3712 // but preserves tosca with the given state.
3713 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3714   if (__ can_post_field_modification()) {
3715     // Check to see if a field modification watch has been set before we take
3716     // the time to call into the VM.
3717     Label done;
3718 
3719     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3720     __ cbz(R2, done);
3721 
3722     __ pop_ptr(R3);               // copy the object pointer from tos
3723     __ verify_oop(R3);
3724     __ push_ptr(R3);              // put the object pointer back on tos
3725 
3726     __ push(state);               // save value on the stack
3727 
3728     // access constant pool cache entry
3729     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3730 
3731     __ mov(R1, R3);
3732     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3733     __ mov(R3, Rstack_top); // put tos addr into R3
3734 
3735     // R1: object pointer copied above
3736     // R2: cache entry pointer
3737     // R3: jvalue object on the stack
3738     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3739 
3740     __ pop(state);                // restore value
3741 
3742     __ bind(done);
3743   }
3744 }
3745 
3746 
3747 void TemplateTable::fast_storefield(TosState state) {
3748   transition(state, vtos);
3749 
3750   ByteSize base = ConstantPoolCache::base_offset();
3751 
3752   jvmti_post_fast_field_mod(state);
3753 
3754   const Register Rcache  = R2_tmp;
3755   const Register Rindex  = R3_tmp;
3756   const Register Roffset = R3_tmp;
3757   const Register Rflags  = Rtmp_save0; // R4/R19
3758   const Register Robj    = R5_tmp;
3759 
3760   const bool gen_volatile_check = os::is_MP();
3761 
3762   // access constant pool cache
3763   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3764 
3765   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3766 
3767   if (gen_volatile_check) {
3768     // load flags to test volatile
3769     __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3770   }
3771 
3772   // replace index with field offset from cache entry
3773   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3774 
3775   if (gen_volatile_check) {
3776     // Check for volatile store
3777     Label notVolatile;
3778     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3779 
3780     // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3781     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3782 
3783     __ bind(notVolatile);
3784   }
3785 
3786   // Get object from stack
3787   pop_and_check_object(Robj);
3788 
3789   // access field
3790   switch (bytecode()) {
3791     case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3792                                      // fall through
3793     case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3794     case Bytecodes::_fast_sputfield: // fall through
3795     case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3796     case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3797 #ifdef AARCH64
3798     case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
3799     case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3800     case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3801 #else
3802     case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3803                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3804 
3805 #ifdef __SOFTFP__
3806     case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
3807     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3808                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3809 #else
3810     case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3811                                      __ fsts(S0_tos, Address(Robj));          break;
3812     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3813                                      __ fstd(D0_tos, Address(Robj));          break;
3814 #endif // __SOFTFP__
3815 #endif // AARCH64
3816 
3817     case Bytecodes::_fast_aputfield:
3818       do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, _bs->kind(), false, false);
3819       break;
3820 
3821     default:
3822       ShouldNotReachHere();
3823   }
3824 
3825   if (gen_volatile_check) {
3826     Label notVolatile;
3827     Label skipMembar;
3828     __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3829                    1 << ConstantPoolCacheEntry::is_final_shift);
3830     __ b(skipMembar, eq);
3831 
3832     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3833 
3834     // StoreLoad barrier after volatile field write
3835     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3836     __ b(skipMembar);
3837 
3838     // StoreStore barrier after final field write
3839     __ bind(notVolatile);
3840     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3841 
3842     __ bind(skipMembar);
3843   }
3844 }
3845 
3846 
3847 void TemplateTable::fast_accessfield(TosState state) {
3848   transition(atos, state);
3849 
3850   // do the JVMTI work here to avoid disturbing the register state below
3851   if (__ can_post_field_access()) {
3852     // Check to see if a field access watch has been set before we take
3853     // the time to call into the VM.
3854     Label done;
3855     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3856     __ cbz(R2, done);
3857     // access constant pool cache entry
3858     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3859     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3860     __ verify_oop(R0_tos);
3861     __ mov(R1, R0_tos);
3862     // R1: object pointer copied above
3863     // R2: cache entry pointer
3864     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3865     __ pop_ptr(R0_tos);   // restore object pointer
3866 
3867     __ bind(done);
3868   }
3869 
3870   const Register Robj    = R0_tos;
3871   const Register Rcache  = R2_tmp;
3872   const Register Rflags  = R2_tmp;
3873   const Register Rindex  = R3_tmp;
3874   const Register Roffset = R3_tmp;
3875 
3876   const bool gen_volatile_check = os::is_MP();
3877 
3878   // access constant pool cache
3879   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3880   // replace index with field offset from cache entry
3881   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3882   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3883 
3884   if (gen_volatile_check) {
3885     // load flags to test volatile
3886     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3887   }
3888 
3889   __ verify_oop(Robj);
3890   __ null_check(Robj, Rtemp);
3891 
3892   // access field
3893   switch (bytecode()) {
3894     case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
3895     case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
3896     case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
3897     case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
3898 #ifdef AARCH64
3899     case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3900     case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
3901     case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
3902 #else
3903     case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
3904                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3905 #ifdef __SOFTFP__
3906     case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3907     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
3908                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3909 #else
3910     case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
3911     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
3912 #endif // __SOFTFP__
3913 #endif // AARCH64
3914     case Bytecodes::_fast_agetfield: __ load_heap_oop(R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
3915     default:
3916       ShouldNotReachHere();
3917   }
3918 
3919   if (gen_volatile_check) {
3920     // Check for volatile load
3921     Label notVolatile;
3922     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3923 
3924     // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
3925     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3926 
3927     __ bind(notVolatile);
3928   }
3929 }
3930 
3931 
3932 void TemplateTable::fast_xaccess(TosState state) {
3933   transition(vtos, state);
3934 
3935   const Register Robj = R1_tmp;
3936   const Register Rcache = R2_tmp;
3937   const Register Rindex = R3_tmp;
3938   const Register Roffset = R3_tmp;
3939   const Register Rflags = R4_tmp;
3940   Label done;
3941 
3942   // get receiver
3943   __ ldr(Robj, aaddress(0));
3944 
3945   // access constant pool cache
3946   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
3947   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3948   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3949 
3950   const bool gen_volatile_check = os::is_MP();
3951 
3952   if (gen_volatile_check) {
3953     // load flags to test volatile
3954     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3955   }
3956 
3957   // make sure exception is reported in correct bcp range (getfield is next instruction)
3958   __ add(Rbcp, Rbcp, 1);
3959   __ null_check(Robj, Rtemp);
3960   __ sub(Rbcp, Rbcp, 1);
3961 
3962 #ifdef AARCH64
3963   if (gen_volatile_check) {
3964     Label notVolatile;
3965     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3966 
3967     __ add(Rtemp, Robj, Roffset);
3968 
3969     if (state == itos) {
3970       __ ldar_w(R0_tos, Rtemp);
3971     } else if (state == atos) {
3972       if (UseCompressedOops) {
3973         __ ldar_w(R0_tos, Rtemp);
3974         __ decode_heap_oop(R0_tos);
3975       } else {
3976         __ ldar(R0_tos, Rtemp);
3977       }
3978       __ verify_oop(R0_tos);
3979     } else if (state == ftos) {
3980       __ ldar_w(R0_tos, Rtemp);
3981       __ fmov_sw(S0_tos, R0_tos);
3982     } else {
3983       ShouldNotReachHere();
3984     }
3985     __ b(done);
3986 
3987     __ bind(notVolatile);
3988   }
3989 #endif // AARCH64
3990 
3991   if (state == itos) {
3992     __ ldr_s32(R0_tos, Address(Robj, Roffset));
3993   } else if (state == atos) {
3994     __ load_heap_oop(R0_tos, Address(Robj, Roffset));
3995     __ verify_oop(R0_tos);
3996   } else if (state == ftos) {
3997 #ifdef AARCH64
3998     __ ldr_s(S0_tos, Address(Robj, Roffset));
3999 #else
4000 #ifdef __SOFTFP__
4001     __ ldr(R0_tos, Address(Robj, Roffset));
4002 #else
4003     __ add(Roffset, Robj, Roffset);
4004     __ flds(S0_tos, Address(Roffset));
4005 #endif // __SOFTFP__
4006 #endif // AARCH64
4007   } else {
4008     ShouldNotReachHere();
4009   }
4010 
4011 #ifndef AARCH64
4012   if (gen_volatile_check) {
4013     // Check for volatile load
4014     Label notVolatile;
4015     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4016 
4017     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4018 
4019     __ bind(notVolatile);
4020   }
4021 #endif // !AARCH64
4022 
4023   __ bind(done);
4024 }
4025 
4026 
4027 
4028 //----------------------------------------------------------------------------------------------------
4029 // Calls
4030 
4031 void TemplateTable::count_calls(Register method, Register temp) {
4032   // implemented elsewhere
4033   ShouldNotReachHere();
4034 }
4035 
4036 
4037 void TemplateTable::prepare_invoke(int byte_no,
4038                                    Register method,  // linked method (or i-klass)
4039                                    Register index,   // itable index, MethodType, etc.
4040                                    Register recv,    // if caller wants to see it
4041                                    Register flags    // if caller wants to test it
4042                                    ) {
4043   // determine flags
4044   const Bytecodes::Code code = bytecode();
4045   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
4046   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
4047   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
4048   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
4049   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
4050   const bool load_receiver       = (recv != noreg);
4051   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4052   assert(recv  == noreg || recv  == R2, "");
4053   assert(flags == noreg || flags == R3, "");
4054 
4055   // setup registers & access constant pool cache
4056   if (recv  == noreg)  recv  = R2;
4057   if (flags == noreg)  flags = R3;
4058   const Register temp = Rtemp;
4059   const Register ret_type = R1_tmp;
4060   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4061 
4062   // save 'interpreter return address'
4063   __ save_bcp();
4064 
4065   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4066 
4067   // maybe push extra argument
4068   if (is_invokedynamic || is_invokehandle) {
4069     Label L_no_push;
4070     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4071     __ mov(temp, index);
4072     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4073     __ load_resolved_reference_at_index(index, temp);
4074     __ verify_oop(index);
4075     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
4076     __ bind(L_no_push);
4077   }
4078 
4079   // load receiver if needed (after extra argument is pushed so parameter size is correct)
4080   if (load_receiver) {
4081     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
4082     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4083     __ ldr(recv, recv_addr);
4084     __ verify_oop(recv);
4085   }
4086 
4087   // compute return type
4088   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4089   // Make sure we don't need to mask flags after the above shift
4090   ConstantPoolCacheEntry::verify_tos_state_shift();
4091   // load return address
4092   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4093     __ mov_slow(temp, table);
4094     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4095   }
4096 }
4097 
4098 
4099 void TemplateTable::invokevirtual_helper(Register index,
4100                                          Register recv,
4101                                          Register flags) {
4102 
4103   const Register recv_klass = R2_tmp;
4104 
4105   assert_different_registers(index, recv, flags, Rtemp);
4106   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4107 
4108   // Test for an invoke of a final method
4109   Label notFinal;
4110   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4111 
4112   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4113 
4114   // do the call - the index is actually the method to call
4115 
4116   // It's final, need a null check here!
4117   __ null_check(recv, Rtemp);
4118 
4119   // profile this call
4120   __ profile_final_call(R0_tmp);
4121 
4122   __ jump_from_interpreted(Rmethod);
4123 
4124   __ bind(notFinal);
4125 
4126   // get receiver klass
4127   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4128   __ load_klass(recv_klass, recv);
4129 
4130   // profile this call
4131   __ profile_virtual_call(R0_tmp, recv_klass);
4132 
4133   // get target Method* & entry point
4134   const int base = in_bytes(Klass::vtable_start_offset());
4135   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4136   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4137   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4138   __ jump_from_interpreted(Rmethod);
4139 }
4140 
4141 void TemplateTable::invokevirtual(int byte_no) {
4142   transition(vtos, vtos);
4143   assert(byte_no == f2_byte, "use this argument");
4144 
4145   const Register Rrecv  = R2_tmp;
4146   const Register Rflags = R3_tmp;
4147 
4148   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4149 
4150   // Rmethod: index
4151   // Rrecv:   receiver
4152   // Rflags:  flags
4153   // LR:      return address
4154 
4155   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4156 }
4157 
4158 
4159 void TemplateTable::invokespecial(int byte_no) {
4160   transition(vtos, vtos);
4161   assert(byte_no == f1_byte, "use this argument");
4162   const Register Rrecv  = R2_tmp;
4163   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4164   __ verify_oop(Rrecv);
4165   __ null_check(Rrecv, Rtemp);
4166   // do the call
4167   __ profile_call(Rrecv);
4168   __ jump_from_interpreted(Rmethod);
4169 }
4170 
4171 
4172 void TemplateTable::invokestatic(int byte_no) {
4173   transition(vtos, vtos);
4174   assert(byte_no == f1_byte, "use this argument");
4175   prepare_invoke(byte_no, Rmethod);
4176   // do the call
4177   __ profile_call(R2_tmp);
4178   __ jump_from_interpreted(Rmethod);
4179 }
4180 
4181 
4182 void TemplateTable::fast_invokevfinal(int byte_no) {
4183   transition(vtos, vtos);
4184   assert(byte_no == f2_byte, "use this argument");
4185   __ stop("fast_invokevfinal is not used on ARM");
4186 }
4187 
4188 
4189 void TemplateTable::invokeinterface(int byte_no) {
4190   transition(vtos, vtos);
4191   assert(byte_no == f1_byte, "use this argument");
4192 
4193   const Register Ritable = R1_tmp;
4194   const Register Rrecv   = R2_tmp;
4195   const Register Rinterf = R5_tmp;
4196   const Register Rindex  = R4_tmp;
4197   const Register Rflags  = R3_tmp;
4198   const Register Rklass  = R3_tmp;
4199 
4200   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4201 
4202   // Special case of invokeinterface called for virtual method of
4203   // java.lang.Object.  See cpCacheOop.cpp for details.
4204   // This code isn't produced by javac, but could be produced by
4205   // another compliant java compiler.
4206   Label notMethod;
4207   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
4208 
4209   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4210   __ bind(notMethod);
4211 
4212   // Get receiver klass into Rklass - also a null check
4213   __ load_klass(Rklass, Rrecv);
4214 
4215   Label no_such_interface;
4216 
4217   // Receiver subtype check against REFC.
4218   __ lookup_interface_method(// inputs: rec. class, interface
4219                              Rklass, Rinterf, noreg,
4220                              // outputs:  scan temp. reg1, scan temp. reg2
4221                              noreg, Ritable, Rtemp,
4222                              no_such_interface);
4223 
4224   // profile this call
4225   __ profile_virtual_call(R0_tmp, Rklass);
4226 
4227   // Get declaring interface class from method
4228   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4229   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4230   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4231 
4232   // Get itable index from method
4233   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4234   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4235   __ neg(Rindex, Rtemp);
4236 
4237   __ lookup_interface_method(// inputs: rec. class, interface
4238                              Rklass, Rinterf, Rindex,
4239                              // outputs:  scan temp. reg1, scan temp. reg2
4240                              Rmethod, Ritable, Rtemp,
4241                              no_such_interface);
4242 
4243   // Rmethod: Method* to call
4244 
4245   // Check for abstract method error
4246   // Note: This should be done more efficiently via a throw_abstract_method_error
4247   //       interpreter entry point and a conditional jump to it in case of a null
4248   //       method.
4249   { Label L;
4250     __ cbnz(Rmethod, L);
4251     // throw exception
4252     // note: must restore interpreter registers to canonical
4253     //       state for exception handling to work correctly!
4254     __ restore_method();
4255     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4256     // the call_VM checks for exception, so we should never return here.
4257     __ should_not_reach_here();
4258     __ bind(L);
4259   }
4260 
4261   // do the call
4262   __ jump_from_interpreted(Rmethod);
4263 
4264   // throw exception
4265   __ bind(no_such_interface);
4266   __ restore_method();
4267   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4268   // the call_VM checks for exception, so we should never return here.
4269   __ should_not_reach_here();
4270 }
4271 
4272 void TemplateTable::invokehandle(int byte_no) {
4273   transition(vtos, vtos);
4274 
4275   // TODO-AARCH64 review register usage
4276   const Register Rrecv  = R2_tmp;
4277   const Register Rmtype = R4_tmp;
4278   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4279 
4280   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4281   __ null_check(Rrecv, Rtemp);
4282 
4283   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
4284   // Rmethod: MH.invokeExact_MT method (from f2)
4285 
4286   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
4287 
4288   // do the call
4289   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
4290   __ mov(Rmethod, R5_method);
4291   __ jump_from_interpreted(Rmethod);
4292 }
4293 
4294 void TemplateTable::invokedynamic(int byte_no) {
4295   transition(vtos, vtos);
4296 
4297   // TODO-AARCH64 review register usage
4298   const Register Rcallsite = R4_tmp;
4299   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4300 
4301   prepare_invoke(byte_no, R5_method, Rcallsite);
4302 
4303   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4304   // Rmethod:   MH.linkToCallSite method (from f2)
4305 
4306   // Note:  Rcallsite is already pushed by prepare_invoke
4307 
4308   if (ProfileInterpreter) {
4309     __ profile_call(R2_tmp);
4310   }
4311 
4312   // do the call
4313   __ mov(Rmethod, R5_method);
4314   __ jump_from_interpreted(Rmethod);
4315 }
4316 
4317 //----------------------------------------------------------------------------------------------------
4318 // Allocation
4319 
4320 void TemplateTable::_new() {
4321   transition(vtos, atos);
4322 
4323   const Register Robj   = R0_tos;
4324   const Register Rcpool = R1_tmp;
4325   const Register Rindex = R2_tmp;
4326   const Register Rtags  = R3_tmp;
4327   const Register Rsize  = R3_tmp;
4328 
4329   Register Rklass = R4_tmp;
4330   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4331   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4332 
4333   Label slow_case;
4334   Label done;
4335   Label initialize_header;
4336   Label initialize_object;  // including clearing the fields
4337 
4338   const bool allow_shared_alloc =
4339     Universe::heap()->supports_inline_contig_alloc();
4340 
4341   // Literals
4342   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4343 
4344   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4345   __ get_cpool_and_tags(Rcpool, Rtags);
4346 
4347   // Make sure the class we're about to instantiate has been resolved.
4348   // This is done before loading InstanceKlass to be consistent with the order
4349   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4350   const int tags_offset = Array<u1>::base_offset_in_bytes();
4351   __ add(Rtemp, Rtags, Rindex);
4352 
4353 #ifdef AARCH64
4354   __ add(Rtemp, Rtemp, tags_offset);
4355   __ ldarb(Rtemp, Rtemp);
4356 #else
4357   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4358 
4359   // use Rklass as a scratch
4360   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4361 #endif // AARCH64
4362 
4363   // get InstanceKlass
4364   __ cmp(Rtemp, JVM_CONSTANT_Class);
4365   __ b(slow_case, ne);
4366   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4367 
4368   // make sure klass is initialized & doesn't have finalizer
4369   // make sure klass is fully initialized
4370   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4371   __ cmp(Rtemp, InstanceKlass::fully_initialized);
4372   __ b(slow_case, ne);
4373 
4374   // get instance_size in InstanceKlass (scaled to a count of bytes)
4375   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4376 
4377   // test to see if it has a finalizer or is malformed in some way
4378   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4379   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4380 
4381   // Allocate the instance:
4382   //  If TLAB is enabled:
4383   //    Try to allocate in the TLAB.
4384   //    If fails, go to the slow path.
4385   //  Else If inline contiguous allocations are enabled:
4386   //    Try to allocate in eden.
4387   //    If fails due to heap end, go to slow path.
4388   //
4389   //  If TLAB is enabled OR inline contiguous is enabled:
4390   //    Initialize the allocation.
4391   //    Exit.
4392   //
4393   //  Go to slow path.
4394   if (UseTLAB) {
4395     const Register Rtlab_top = R1_tmp;
4396     const Register Rtlab_end = R2_tmp;
4397     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4398 
4399     __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4400     __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset())));
4401     __ add(Rtlab_top, Robj, Rsize);
4402     __ cmp(Rtlab_top, Rtlab_end);
4403     __ b(slow_case, hi);
4404     __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4405     if (ZeroTLAB) {
4406       // the fields have been already cleared
4407       __ b(initialize_header);
4408     } else {
4409       // initialize both the header and fields
4410       __ b(initialize_object);
4411     }
4412   } else {
4413     // Allocation in the shared Eden, if allowed.
4414     if (allow_shared_alloc) {
4415       const Register Rheap_top_addr = R2_tmp;
4416       const Register Rheap_top = R5_tmp;
4417       const Register Rheap_end = Rtemp;
4418       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4419 
4420       // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4421       __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4422 
4423       Label retry;
4424       __ bind(retry);
4425 
4426 #ifdef AARCH64
4427       __ ldxr(Robj, Rheap_top_addr);
4428 #else
4429       __ ldr(Robj, Address(Rheap_top_addr));
4430 #endif // AARCH64
4431 
4432       __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4433       __ add(Rheap_top, Robj, Rsize);
4434       __ cmp(Rheap_top, Rheap_end);
4435       __ b(slow_case, hi);
4436 
4437       // Update heap top atomically.
4438       // If someone beats us on the allocation, try again, otherwise continue.
4439 #ifdef AARCH64
4440       __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4441       __ cbnz_w(Rtemp2, retry);
4442 #else
4443       __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4444       __ b(retry, ne);
4445 #endif // AARCH64
4446 
4447       __ incr_allocated_bytes(Rsize, Rtemp);
4448     }
4449   }
4450 
4451   if (UseTLAB || allow_shared_alloc) {
4452     const Register Rzero0 = R1_tmp;
4453     const Register Rzero1 = R2_tmp;
4454     const Register Rzero_end = R5_tmp;
4455     const Register Rzero_cur = Rtemp;
4456     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4457 
4458     // The object is initialized before the header.  If the object size is
4459     // zero, go directly to the header initialization.
4460     __ bind(initialize_object);
4461     __ subs(Rsize, Rsize, sizeof(oopDesc));
4462     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4463     __ b(initialize_header, eq);
4464 
4465 #ifdef ASSERT
4466     // make sure Rsize is a multiple of 8
4467     Label L;
4468     __ tst(Rsize, 0x07);
4469     __ b(L, eq);
4470     __ stop("object size is not multiple of 8 - adjust this code");
4471     __ bind(L);
4472 #endif
4473 
4474 #ifdef AARCH64
4475     {
4476       Label loop;
4477       // Step back by 1 word if object size is not a multiple of 2*wordSize.
4478       assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4479       __ andr(Rtemp2, Rsize, (uintx)wordSize);
4480       __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4481 
4482       // Zero by 2 words per iteration.
4483       __ bind(loop);
4484       __ subs(Rsize, Rsize, 2*wordSize);
4485       __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4486       __ b(loop, gt);
4487     }
4488 #else
4489     __ mov(Rzero0, 0);
4490     __ mov(Rzero1, 0);
4491     __ add(Rzero_end, Rzero_cur, Rsize);
4492 
4493     // initialize remaining object fields: Rsize was a multiple of 8
4494     { Label loop;
4495       // loop is unrolled 2 times
4496       __ bind(loop);
4497       // #1
4498       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4499       __ cmp(Rzero_cur, Rzero_end);
4500       // #2
4501       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4502       __ cmp(Rzero_cur, Rzero_end, ne);
4503       __ b(loop, ne);
4504     }
4505 #endif // AARCH64
4506 
4507     // initialize object header only.
4508     __ bind(initialize_header);
4509     if (UseBiasedLocking) {
4510       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4511     } else {
4512       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4513     }
4514     // mark
4515     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4516 
4517     // klass
4518 #ifdef AARCH64
4519     __ store_klass_gap(Robj);
4520 #endif // AARCH64
4521     __ store_klass(Rklass, Robj); // blows Rklass:
4522     Rklass = noreg;
4523 
4524     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4525     if (DTraceAllocProbes) {
4526       // Trigger dtrace event for fastpath
4527       Label Lcontinue;
4528 
4529       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4530       __ cbz(Rtemp, Lcontinue);
4531 
4532       __ push(atos);
4533       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4534       __ pop(atos);
4535 
4536       __ bind(Lcontinue);
4537     }
4538 
4539     __ b(done);
4540   } else {
4541     // jump over literals
4542     __ b(slow_case);
4543   }
4544 
4545   if (allow_shared_alloc) {
4546     __ bind_literal(Lheap_top_addr);
4547   }
4548 
4549   // slow case
4550   __ bind(slow_case);
4551   __ get_constant_pool(Rcpool);
4552   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4553   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4554 
4555   // continue
4556   __ bind(done);
4557 
4558   // StoreStore barrier required after complete initialization
4559   // (headers + content zeroing), before the object may escape.
4560   __ membar(MacroAssembler::StoreStore, R1_tmp);
4561 }
4562 
4563 
4564 void TemplateTable::newarray() {
4565   transition(itos, atos);
4566   __ ldrb(R1, at_bcp(1));
4567   __ mov(R2, R0_tos);
4568   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4569   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4570 }
4571 
4572 
4573 void TemplateTable::anewarray() {
4574   transition(itos, atos);
4575   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4576   __ get_constant_pool(R1);
4577   __ mov(R3, R0_tos);
4578   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4579   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4580 }
4581 
4582 
4583 void TemplateTable::arraylength() {
4584   transition(atos, itos);
4585   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4586   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4587 }
4588 
4589 
4590 void TemplateTable::checkcast() {
4591   transition(atos, atos);
4592   Label done, is_null, quicked, resolved, throw_exception;
4593 
4594   const Register Robj = R0_tos;
4595   const Register Rcpool = R2_tmp;
4596   const Register Rtags = R3_tmp;
4597   const Register Rindex = R4_tmp;
4598   const Register Rsuper = R3_tmp;
4599   const Register Rsub   = R4_tmp;
4600   const Register Rsubtype_check_tmp1 = R1_tmp;
4601   const Register Rsubtype_check_tmp2 = LR_tmp;
4602 
4603   __ cbz(Robj, is_null);
4604 
4605   // Get cpool & tags index
4606   __ get_cpool_and_tags(Rcpool, Rtags);
4607   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4608 
4609   // See if bytecode has already been quicked
4610   __ add(Rtemp, Rtags, Rindex);
4611 #ifdef AARCH64
4612   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4613   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4614   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4615 #else
4616   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4617 #endif // AARCH64
4618 
4619   __ cmp(Rtemp, JVM_CONSTANT_Class);
4620 
4621 #ifndef AARCH64
4622   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4623 #endif // !AARCH64
4624 
4625   __ b(quicked, eq);
4626 
4627   __ push(atos);
4628   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4629   // vm_result_2 has metadata result
4630   __ get_vm_result_2(Rsuper, Robj);
4631   __ pop_ptr(Robj);
4632   __ b(resolved);
4633 
4634   __ bind(throw_exception);
4635   // Come here on failure of subtype check
4636   __ profile_typecheck_failed(R1_tmp);
4637   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4638   __ b(Interpreter::_throw_ClassCastException_entry);
4639 
4640   // Get superklass in Rsuper and subklass in Rsub
4641   __ bind(quicked);
4642   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4643 
4644   __ bind(resolved);
4645   __ load_klass(Rsub, Robj);
4646 
4647   // Generate subtype check. Blows both tmps and Rtemp.
4648   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4649   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4650 
4651   // Come here on success
4652 
4653   // Collect counts on whether this check-cast sees NULLs a lot or not.
4654   if (ProfileInterpreter) {
4655     __ b(done);
4656     __ bind(is_null);
4657     __ profile_null_seen(R1_tmp);
4658   } else {
4659     __ bind(is_null);   // same as 'done'
4660   }
4661   __ bind(done);
4662 }
4663 
4664 
4665 void TemplateTable::instanceof() {
4666   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4667   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4668 
4669   transition(atos, itos);
4670   Label done, is_null, not_subtype, quicked, resolved;
4671 
4672   const Register Robj = R0_tos;
4673   const Register Rcpool = R2_tmp;
4674   const Register Rtags = R3_tmp;
4675   const Register Rindex = R4_tmp;
4676   const Register Rsuper = R3_tmp;
4677   const Register Rsub   = R4_tmp;
4678   const Register Rsubtype_check_tmp1 = R0_tmp;
4679   const Register Rsubtype_check_tmp2 = R1_tmp;
4680 
4681   __ cbz(Robj, is_null);
4682 
4683   __ load_klass(Rsub, Robj);
4684 
4685   // Get cpool & tags index
4686   __ get_cpool_and_tags(Rcpool, Rtags);
4687   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4688 
4689   // See if bytecode has already been quicked
4690   __ add(Rtemp, Rtags, Rindex);
4691 #ifdef AARCH64
4692   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4693   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4694   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4695 #else
4696   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4697 #endif // AARCH64
4698   __ cmp(Rtemp, JVM_CONSTANT_Class);
4699 
4700 #ifndef AARCH64
4701   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4702 #endif // !AARCH64
4703 
4704   __ b(quicked, eq);
4705 
4706   __ push(atos);
4707   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4708   // vm_result_2 has metadata result
4709   __ get_vm_result_2(Rsuper, Robj);
4710   __ pop_ptr(Robj);
4711   __ b(resolved);
4712 
4713   // Get superklass in Rsuper and subklass in Rsub
4714   __ bind(quicked);
4715   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4716 
4717   __ bind(resolved);
4718   __ load_klass(Rsub, Robj);
4719 
4720   // Generate subtype check. Blows both tmps and Rtemp.
4721   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4722 
4723   // Come here on success
4724   __ mov(R0_tos, 1);
4725   __ b(done);
4726 
4727   __ bind(not_subtype);
4728   // Come here on failure
4729   __ profile_typecheck_failed(R1_tmp);
4730   __ mov(R0_tos, 0);
4731 
4732   // Collect counts on whether this test sees NULLs a lot or not.
4733   if (ProfileInterpreter) {
4734     __ b(done);
4735     __ bind(is_null);
4736     __ profile_null_seen(R1_tmp);
4737   } else {
4738     __ bind(is_null);   // same as 'done'
4739   }
4740   __ bind(done);
4741 }
4742 
4743 
4744 //----------------------------------------------------------------------------------------------------
4745 // Breakpoints
4746 void TemplateTable::_breakpoint() {
4747 
4748   // Note: We get here even if we are single stepping..
4749   // jbug inists on setting breakpoints at every bytecode
4750   // even if we are in single step mode.
4751 
4752   transition(vtos, vtos);
4753 
4754   // get the unpatched byte code
4755   __ mov(R1, Rmethod);
4756   __ mov(R2, Rbcp);
4757   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4758 #ifdef AARCH64
4759   __ sxtw(Rtmp_save0, R0);
4760 #else
4761   __ mov(Rtmp_save0, R0);
4762 #endif // AARCH64
4763 
4764   // post the breakpoint event
4765   __ mov(R1, Rmethod);
4766   __ mov(R2, Rbcp);
4767   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4768 
4769   // complete the execution of original bytecode
4770   __ mov(R3_bytecode, Rtmp_save0);
4771   __ dispatch_only_normal(vtos);
4772 }
4773 
4774 
4775 //----------------------------------------------------------------------------------------------------
4776 // Exceptions
4777 
4778 void TemplateTable::athrow() {
4779   transition(atos, vtos);
4780   __ mov(Rexception_obj, R0_tos);
4781   __ null_check(Rexception_obj, Rtemp);
4782   __ b(Interpreter::throw_exception_entry());
4783 }
4784 
4785 
4786 //----------------------------------------------------------------------------------------------------
4787 // Synchronization
4788 //
4789 // Note: monitorenter & exit are symmetric routines; which is reflected
4790 //       in the assembly code structure as well
4791 //
4792 // Stack layout:
4793 //
4794 // [expressions  ] <--- Rstack_top        = expression stack top
4795 // ..
4796 // [expressions  ]
4797 // [monitor entry] <--- monitor block top = expression stack bot
4798 // ..
4799 // [monitor entry]
4800 // [frame data   ] <--- monitor block bot
4801 // ...
4802 // [saved FP     ] <--- FP
4803 
4804 
4805 void TemplateTable::monitorenter() {
4806   transition(atos, vtos);
4807 
4808   const Register Robj = R0_tos;
4809   const Register Rentry = R1_tmp;
4810 
4811   // check for NULL object
4812   __ null_check(Robj, Rtemp);
4813 
4814   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4815   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4816   Label allocate_monitor, allocated;
4817 
4818   // initialize entry pointer
4819   __ mov(Rentry, 0);                             // points to free slot or NULL
4820 
4821   // find a free slot in the monitor block (result in Rentry)
4822   { Label loop, exit;
4823     const Register Rcur = R2_tmp;
4824     const Register Rcur_obj = Rtemp;
4825     const Register Rbottom = R3_tmp;
4826     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4827 
4828     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4829                                  // points to current entry, starting with top-most entry
4830     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4831                                  // points to word before bottom of monitor block
4832 
4833     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4834 #ifndef AARCH64
4835     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4836                                                  // prefetch monitor's object for the first iteration
4837 #endif // !AARCH64
4838     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4839 
4840     __ bind(loop);
4841 #ifdef AARCH64
4842     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4843 #endif // AARCH64
4844     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4845     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4846 
4847     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4848     __ b(exit, eq);                              // if same object then stop searching
4849 
4850     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4851 
4852     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4853 #ifndef AARCH64
4854     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4855                                                  // prefetch monitor's object for the next iteration
4856 #endif // !AARCH64
4857     __ b(loop, ne);                              // if not at bottom then check this entry
4858     __ bind(exit);
4859   }
4860 
4861   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4862 
4863   __ bind(allocate_monitor);
4864 
4865   // allocate one if there's no free slot
4866   { Label loop;
4867     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4868 
4869     // 1. compute new pointers
4870 
4871 #ifdef AARCH64
4872     __ check_extended_sp(Rtemp);
4873     __ sub(SP, SP, entry_size);                  // adjust extended SP
4874     __ mov(Rtemp, SP);
4875     __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
4876 #endif // AARCH64
4877 
4878     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4879                                                  // old monitor block top / expression stack bottom
4880 
4881     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4882     __ check_stack_top_on_expansion();
4883 
4884     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4885 
4886     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4887 
4888     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4889                                                  // set new monitor block top
4890 
4891     // 2. move expression stack contents
4892 
4893     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4894 #ifndef AARCH64
4895     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4896 #endif // !AARCH64
4897     __ b(allocated, eq);
4898 
4899     __ bind(loop);
4900 #ifdef AARCH64
4901     __ ldr(Rtemp, Address(R2_tmp, entry_size));             // load expression stack word from old location
4902 #endif // AARCH64
4903     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4904                                                             // and advance to next word
4905     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4906 #ifndef AARCH64
4907     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4908 #endif // !AARCH64
4909     __ b(loop, ne);                                         // if not at bottom then copy next word
4910   }
4911 
4912   // call run-time routine
4913 
4914   // Rentry: points to monitor entry
4915   __ bind(allocated);
4916 
4917   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4918   // The object has already been poped from the stack, so the expression stack looks correct.
4919   __ add(Rbcp, Rbcp, 1);
4920 
4921   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
4922   __ lock_object(Rentry);
4923 
4924   // check to make sure this monitor doesn't cause stack overflow after locking
4925   __ save_bcp();  // in case of exception
4926   __ arm_stack_overflow_check(0, Rtemp);
4927 
4928   // The bcp has already been incremented. Just need to dispatch to next instruction.
4929   __ dispatch_next(vtos);
4930 }
4931 
4932 
4933 void TemplateTable::monitorexit() {
4934   transition(atos, vtos);
4935 
4936   const Register Robj = R0_tos;
4937   const Register Rcur = R1_tmp;
4938   const Register Rbottom = R2_tmp;
4939   const Register Rcur_obj = Rtemp;
4940 
4941   // check for NULL object
4942   __ null_check(Robj, Rtemp);
4943 
4944   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4945   Label found, throw_exception;
4946 
4947   // find matching slot
4948   { Label loop;
4949     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4950 
4951     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4952                                  // points to current entry, starting with top-most entry
4953     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4954                                  // points to word before bottom of monitor block
4955 
4956     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4957 #ifndef AARCH64
4958     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4959                                                  // prefetch monitor's object for the first iteration
4960 #endif // !AARCH64
4961     __ b(throw_exception, eq);                   // throw exception if there are now monitors
4962 
4963     __ bind(loop);
4964 #ifdef AARCH64
4965     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4966 #endif // AARCH64
4967     // check if current entry is for same object
4968     __ cmp(Rcur_obj, Robj);
4969     __ b(found, eq);                             // if same object then stop searching
4970     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4971     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4972 #ifndef AARCH64
4973     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4974 #endif // !AARCH64
4975     __ b (loop, ne);                             // if not at bottom then check this entry
4976   }
4977 
4978   // error handling. Unlocking was not block-structured
4979   __ bind(throw_exception);
4980   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4981   __ should_not_reach_here();
4982 
4983   // call run-time routine
4984   // Rcur: points to monitor entry
4985   __ bind(found);
4986   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
4987   __ unlock_object(Rcur);
4988   __ pop_ptr(Robj);                              // discard object
4989 }
4990 
4991 
4992 //----------------------------------------------------------------------------------------------------
4993 // Wide instructions
4994 
4995 void TemplateTable::wide() {
4996   transition(vtos, vtos);
4997   __ ldrb(R3_bytecode, at_bcp(1));
4998 
4999   InlinedAddress Ltable((address)Interpreter::_wentry_point);
5000   __ ldr_literal(Rtemp, Ltable);
5001   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
5002 
5003   __ nop(); // to avoid filling CPU pipeline with invalid instructions
5004   __ nop();
5005   __ bind_literal(Ltable);
5006 }
5007 
5008 
5009 //----------------------------------------------------------------------------------------------------
5010 // Multi arrays
5011 
5012 void TemplateTable::multianewarray() {
5013   transition(vtos, atos);
5014   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
5015 
5016   // last dim is on top of stack; we want address of first one:
5017   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
5018   // the latter wordSize to point to the beginning of the array.
5019   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5020   __ sub(R1, Rtemp, wordSize);
5021 
5022   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
5023   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5024   // MacroAssembler::StoreStore useless (included in the runtime exit path)
5025 }