1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "interpreter/interp_masm.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/cpCache.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 
  43 #define __ _masm->
  44 
  45 //----------------------------------------------------------------------------------------------------
  46 // Platform-dependent initialization
  47 
  48 void TemplateTable::pd_initialize() {
  49   // No arm specific initialization
  50 }
  51 
  52 //----------------------------------------------------------------------------------------------------
  53 // Address computation
  54 
  55 // local variables
  56 static inline Address iaddress(int n)            {
  57   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n)            { return iaddress(n + 1); }
  61 #ifndef AARCH64
  62 static inline Address haddress(int n)            { return iaddress(n + 0); }
  63 #endif // !AARCH64
  64 
  65 static inline Address faddress(int n)            { return iaddress(n); }
  66 static inline Address daddress(int n)            { return laddress(n); }
  67 static inline Address aaddress(int n)            { return iaddress(n); }
  68 
  69 
  70 void TemplateTable::get_local_base_addr(Register r, Register index) {
  71   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  72 }
  73 
  74 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  75 #ifdef AARCH64
  76   get_local_base_addr(scratch, index);
  77   return Address(scratch);
  78 #else
  79   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  80 #endif // AARCH64
  81 }
  82 
  83 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  84   return load_iaddress(index, scratch);
  85 }
  86 
  87 Address TemplateTable::load_faddress(Register index, Register scratch) {
  88 #ifdef __SOFTFP__
  89   return load_iaddress(index, scratch);
  90 #else
  91   get_local_base_addr(scratch, index);
  92   return Address(scratch);
  93 #endif // __SOFTFP__
  94 }
  95 
  96 Address TemplateTable::load_daddress(Register index, Register scratch) {
  97   get_local_base_addr(scratch, index);
  98   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  99 }
 100 
 101 // At top of Java expression stack which may be different than SP.
 102 // It isn't for category 1 objects.
 103 static inline Address at_tos() {
 104   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
 105 }
 106 
 107 static inline Address at_tos_p1() {
 108   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 109 }
 110 
 111 static inline Address at_tos_p2() {
 112   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 113 }
 114 
 115 
 116 // 32-bit ARM:
 117 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 118 // separate ldr instructions (supports nonadjacent values).
 119 // Used for longs in all modes, and for doubles in SOFTFP mode.
 120 //
 121 // AArch64: loads long local into R0_tos.
 122 //
 123 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 124   const Register Rlocal_base = tmp;
 125   assert_different_registers(Rlocal_index, tmp);
 126 
 127   get_local_base_addr(Rlocal_base, Rlocal_index);
 128 #ifdef AARCH64
 129   __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 130 #else
 131   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 132   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 133 #endif // AARCH64
 134 }
 135 
 136 
 137 // 32-bit ARM:
 138 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 139 // separate str instructions (supports nonadjacent values).
 140 // Used for longs in all modes, and for doubles in SOFTFP mode
 141 //
 142 // AArch64: stores R0_tos to long local.
 143 //
 144 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 145   const Register Rlocal_base = tmp;
 146   assert_different_registers(Rlocal_index, tmp);
 147 
 148   get_local_base_addr(Rlocal_base, Rlocal_index);
 149 #ifdef AARCH64
 150   __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 151 #else
 152   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 153   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 154 #endif // AARCH64
 155 }
 156 
 157 // Returns address of Java array element using temp register as address base.
 158 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 159   int logElemSize = exact_log2(type2aelembytes(elemType));
 160   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 161   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 162 }
 163 
 164 //----------------------------------------------------------------------------------------------------
 165 // Condition conversion
 166 AsmCondition convNegCond(TemplateTable::Condition cc) {
 167   switch (cc) {
 168     case TemplateTable::equal        : return ne;
 169     case TemplateTable::not_equal    : return eq;
 170     case TemplateTable::less         : return ge;
 171     case TemplateTable::less_equal   : return gt;
 172     case TemplateTable::greater      : return le;
 173     case TemplateTable::greater_equal: return lt;
 174   }
 175   ShouldNotReachHere();
 176   return nv;
 177 }
 178 
 179 //----------------------------------------------------------------------------------------------------
 180 // Miscelaneous helper routines
 181 
 182 // Store an oop (or NULL) at the address described by obj.
 183 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 184 // Also destroys new_val and obj.base().
 185 static void do_oop_store(InterpreterMacroAssembler* _masm,
 186                          Address obj,
 187                          Register new_val,
 188                          Register tmp1,
 189                          Register tmp2,
 190                          Register tmp3,
 191                          bool is_null,
 192                          DecoratorSet decorators = 0) {
 193 
 194   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 195   if (is_null) {
 196     __ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
 197   } else {
 198     __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
 199   }
 200 }
 201 
 202 static void do_oop_load(InterpreterMacroAssembler* _masm,
 203                         Register dst,
 204                         Address obj,
 205                         DecoratorSet decorators = 0) {
 206   __ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
 207 }
 208 
 209 Address TemplateTable::at_bcp(int offset) {
 210   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 211   return Address(Rbcp, offset);
 212 }
 213 
 214 
 215 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
 216 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 217                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 218                                    int byte_no) {
 219   assert_different_registers(bc_reg, temp_reg);
 220   if (!RewriteBytecodes)  return;
 221   Label L_patch_done;
 222 
 223   switch (bc) {
 224   case Bytecodes::_fast_aputfield:
 225   case Bytecodes::_fast_bputfield:
 226   case Bytecodes::_fast_zputfield:
 227   case Bytecodes::_fast_cputfield:
 228   case Bytecodes::_fast_dputfield:
 229   case Bytecodes::_fast_fputfield:
 230   case Bytecodes::_fast_iputfield:
 231   case Bytecodes::_fast_lputfield:
 232   case Bytecodes::_fast_sputfield:
 233     {
 234       // We skip bytecode quickening for putfield instructions when
 235       // the put_code written to the constant pool cache is zero.
 236       // This is required so that every execution of this instruction
 237       // calls out to InterpreterRuntime::resolve_get_put to do
 238       // additional, required work.
 239       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 240       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 241       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 242       __ mov(bc_reg, bc);
 243       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 244     }
 245     break;
 246   default:
 247     assert(byte_no == -1, "sanity");
 248     // the pair bytecodes have already done the load.
 249     if (load_bc_into_bc_reg) {
 250       __ mov(bc_reg, bc);
 251     }
 252   }
 253 
 254   if (__ can_post_breakpoint()) {
 255     Label L_fast_patch;
 256     // if a breakpoint is present we can't rewrite the stream directly
 257     __ ldrb(temp_reg, at_bcp(0));
 258     __ cmp(temp_reg, Bytecodes::_breakpoint);
 259     __ b(L_fast_patch, ne);
 260     if (bc_reg != R3) {
 261       __ mov(R3, bc_reg);
 262     }
 263     __ mov(R1, Rmethod);
 264     __ mov(R2, Rbcp);
 265     // Let breakpoint table handling rewrite to quicker bytecode
 266     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 267     __ b(L_patch_done);
 268     __ bind(L_fast_patch);
 269   }
 270 
 271 #ifdef ASSERT
 272   Label L_okay;
 273   __ ldrb(temp_reg, at_bcp(0));
 274   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 275   __ b(L_okay, eq);
 276   __ cmp(temp_reg, bc_reg);
 277   __ b(L_okay, eq);
 278   __ stop("patching the wrong bytecode");
 279   __ bind(L_okay);
 280 #endif
 281 
 282   // patch bytecode
 283   __ strb(bc_reg, at_bcp(0));
 284   __ bind(L_patch_done);
 285 }
 286 
 287 //----------------------------------------------------------------------------------------------------
 288 // Individual instructions
 289 
 290 void TemplateTable::nop() {
 291   transition(vtos, vtos);
 292   // nothing to do
 293 }
 294 
 295 void TemplateTable::shouldnotreachhere() {
 296   transition(vtos, vtos);
 297   __ stop("shouldnotreachhere bytecode");
 298 }
 299 
 300 
 301 
 302 void TemplateTable::aconst_null() {
 303   transition(vtos, atos);
 304   __ mov(R0_tos, 0);
 305 }
 306 
 307 
 308 void TemplateTable::iconst(int value) {
 309   transition(vtos, itos);
 310   __ mov_slow(R0_tos, value);
 311 }
 312 
 313 
 314 void TemplateTable::lconst(int value) {
 315   transition(vtos, ltos);
 316   assert((value == 0) || (value == 1), "unexpected long constant");
 317   __ mov(R0_tos, value);
 318 #ifndef AARCH64
 319   __ mov(R1_tos_hi, 0);
 320 #endif // !AARCH64
 321 }
 322 
 323 
 324 void TemplateTable::fconst(int value) {
 325   transition(vtos, ftos);
 326 #ifdef AARCH64
 327   switch(value) {
 328   case 0:   __ fmov_sw(S0_tos, ZR);    break;
 329   case 1:   __ fmov_s (S0_tos, 0x70);  break;
 330   case 2:   __ fmov_s (S0_tos, 0x00);  break;
 331   default:  ShouldNotReachHere();      break;
 332   }
 333 #else
 334   const int zero = 0;         // 0.0f
 335   const int one = 0x3f800000; // 1.0f
 336   const int two = 0x40000000; // 2.0f
 337 
 338   switch(value) {
 339   case 0:   __ mov(R0_tos, zero);   break;
 340   case 1:   __ mov(R0_tos, one);    break;
 341   case 2:   __ mov(R0_tos, two);    break;
 342   default:  ShouldNotReachHere();   break;
 343   }
 344 
 345 #ifndef __SOFTFP__
 346   __ fmsr(S0_tos, R0_tos);
 347 #endif // !__SOFTFP__
 348 #endif // AARCH64
 349 }
 350 
 351 
 352 void TemplateTable::dconst(int value) {
 353   transition(vtos, dtos);
 354 #ifdef AARCH64
 355   switch(value) {
 356   case 0:   __ fmov_dx(D0_tos, ZR);    break;
 357   case 1:   __ fmov_d (D0_tos, 0x70);  break;
 358   default:  ShouldNotReachHere();      break;
 359   }
 360 #else
 361   const int one_lo = 0;            // low part of 1.0
 362   const int one_hi = 0x3ff00000;   // high part of 1.0
 363 
 364   if (value == 0) {
 365 #ifdef __SOFTFP__
 366     __ mov(R0_tos_lo, 0);
 367     __ mov(R1_tos_hi, 0);
 368 #else
 369     __ mov(R0_tmp, 0);
 370     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 371 #endif // __SOFTFP__
 372   } else if (value == 1) {
 373     __ mov(R0_tos_lo, one_lo);
 374     __ mov_slow(R1_tos_hi, one_hi);
 375 #ifndef __SOFTFP__
 376     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 377 #endif // !__SOFTFP__
 378   } else {
 379     ShouldNotReachHere();
 380   }
 381 #endif // AARCH64
 382 }
 383 
 384 
 385 void TemplateTable::bipush() {
 386   transition(vtos, itos);
 387   __ ldrsb(R0_tos, at_bcp(1));
 388 }
 389 
 390 
 391 void TemplateTable::sipush() {
 392   transition(vtos, itos);
 393   __ ldrsb(R0_tmp, at_bcp(1));
 394   __ ldrb(R1_tmp, at_bcp(2));
 395   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 396 }
 397 
 398 
 399 void TemplateTable::ldc(bool wide) {
 400   transition(vtos, vtos);
 401   Label fastCase, Done;
 402 
 403   const Register Rindex = R1_tmp;
 404   const Register Rcpool = R2_tmp;
 405   const Register Rtags  = R3_tmp;
 406   const Register RtagType = R3_tmp;
 407 
 408   if (wide) {
 409     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 410   } else {
 411     __ ldrb(Rindex, at_bcp(1));
 412   }
 413   __ get_cpool_and_tags(Rcpool, Rtags);
 414 
 415   const int base_offset = ConstantPool::header_size() * wordSize;
 416   const int tags_offset = Array<u1>::base_offset_in_bytes();
 417 
 418   // get const type
 419   __ add(Rtemp, Rtags, tags_offset);
 420 #ifdef AARCH64
 421   __ add(Rtemp, Rtemp, Rindex);
 422   __ ldarb(RtagType, Rtemp);  // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
 423 #else
 424   __ ldrb(RtagType, Address(Rtemp, Rindex));
 425   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 426 #endif // AARCH64
 427 
 428   // unresolved class - get the resolved class
 429   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 430 
 431   // unresolved class in error (resolution failed) - call into runtime
 432   // so that the same error from first resolution attempt is thrown.
 433 #ifdef AARCH64
 434   __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
 435   __ cond_cmp(RtagType, Rtemp, ne);
 436 #else
 437   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 438 #endif // AARCH64
 439 
 440   // resolved class - need to call vm to get java mirror of the class
 441   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 442 
 443   __ b(fastCase, ne);
 444 
 445   // slow case - call runtime
 446   __ mov(R1, wide);
 447   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 448   __ push(atos);
 449   __ b(Done);
 450 
 451   // int, float, String
 452   __ bind(fastCase);
 453 #ifdef ASSERT
 454   { Label L;
 455     __ cmp(RtagType, JVM_CONSTANT_Integer);
 456     __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 457     __ b(L, eq);
 458     __ stop("unexpected tag type in ldc");
 459     __ bind(L);
 460   }
 461 #endif // ASSERT
 462   // itos, ftos
 463   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 464   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 465 
 466   // floats and ints are placed on stack in the same way, so
 467   // we can use push(itos) to transfer float value without VFP
 468   __ push(itos);
 469   __ bind(Done);
 470 }
 471 
 472 // Fast path for caching oop constants.
 473 void TemplateTable::fast_aldc(bool wide) {
 474   transition(vtos, atos);
 475   int index_size = wide ? sizeof(u2) : sizeof(u1);
 476   Label resolved;
 477 
 478   // We are resolved if the resolved reference cache entry contains a
 479   // non-null object (CallSite, etc.)
 480   assert_different_registers(R0_tos, R2_tmp);
 481   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 482   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 483   __ cbnz(R0_tos, resolved);
 484 
 485   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 486 
 487   // first time invocation - must resolve first
 488   __ mov(R1, (int)bytecode());
 489   __ call_VM(R0_tos, entry, R1);
 490   __ bind(resolved);
 491 
 492   if (VerifyOops) {
 493     __ verify_oop(R0_tos);
 494   }
 495 }
 496 
 497 void TemplateTable::ldc2_w() {
 498   transition(vtos, vtos);
 499   const Register Rtags  = R2_tmp;
 500   const Register Rindex = R3_tmp;
 501   const Register Rcpool = R4_tmp;
 502   const Register Rbase  = R5_tmp;
 503 
 504   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 505 
 506   __ get_cpool_and_tags(Rcpool, Rtags);
 507   const int base_offset = ConstantPool::header_size() * wordSize;
 508   const int tags_offset = Array<u1>::base_offset_in_bytes();
 509 
 510   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 511 
 512 #ifdef __ABI_HARD__
 513   Label Long, exit;
 514   // get type from tags
 515   __ add(Rtemp, Rtags, tags_offset);
 516   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 517   __ cmp(Rtemp, JVM_CONSTANT_Double);
 518   __ b(Long, ne);
 519   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 520 
 521   __ push(dtos);
 522   __ b(exit);
 523   __ bind(Long);
 524 #endif
 525 
 526 #ifdef AARCH64
 527   __ ldr(R0_tos, Address(Rbase, base_offset));
 528 #else
 529   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 530   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 531 #endif // AARCH64
 532   __ push(ltos);
 533 
 534 #ifdef __ABI_HARD__
 535   __ bind(exit);
 536 #endif
 537 }
 538 
 539 
 540 void TemplateTable::locals_index(Register reg, int offset) {
 541   __ ldrb(reg, at_bcp(offset));
 542 }
 543 
 544 void TemplateTable::iload() {
 545   iload_internal();
 546 }
 547 
 548 void TemplateTable::nofast_iload() {
 549   iload_internal(may_not_rewrite);
 550 }
 551 
 552 void TemplateTable::iload_internal(RewriteControl rc) {
 553   transition(vtos, itos);
 554 
 555   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 556     Label rewrite, done;
 557     const Register next_bytecode = R1_tmp;
 558     const Register target_bytecode = R2_tmp;
 559 
 560     // get next byte
 561     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 562     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 563     // last two iloads in a pair.  Comparing against fast_iload means that
 564     // the next bytecode is neither an iload or a caload, and therefore
 565     // an iload pair.
 566     __ cmp(next_bytecode, Bytecodes::_iload);
 567     __ b(done, eq);
 568 
 569     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 570     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 571     __ b(rewrite, eq);
 572 
 573     // if _caload, rewrite to fast_icaload
 574     __ cmp(next_bytecode, Bytecodes::_caload);
 575     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 576     __ b(rewrite, eq);
 577 
 578     // rewrite so iload doesn't check again.
 579     __ mov(target_bytecode, Bytecodes::_fast_iload);
 580 
 581     // rewrite
 582     // R2: fast bytecode
 583     __ bind(rewrite);
 584     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 585     __ bind(done);
 586   }
 587 
 588   // Get the local value into tos
 589   const Register Rlocal_index = R1_tmp;
 590   locals_index(Rlocal_index);
 591   Address local = load_iaddress(Rlocal_index, Rtemp);
 592   __ ldr_s32(R0_tos, local);
 593 }
 594 
 595 
 596 void TemplateTable::fast_iload2() {
 597   transition(vtos, itos);
 598   const Register Rlocal_index = R1_tmp;
 599 
 600   locals_index(Rlocal_index);
 601   Address local = load_iaddress(Rlocal_index, Rtemp);
 602   __ ldr_s32(R0_tos, local);
 603   __ push(itos);
 604 
 605   locals_index(Rlocal_index, 3);
 606   local = load_iaddress(Rlocal_index, Rtemp);
 607   __ ldr_s32(R0_tos, local);
 608 }
 609 
 610 void TemplateTable::fast_iload() {
 611   transition(vtos, itos);
 612   const Register Rlocal_index = R1_tmp;
 613 
 614   locals_index(Rlocal_index);
 615   Address local = load_iaddress(Rlocal_index, Rtemp);
 616   __ ldr_s32(R0_tos, local);
 617 }
 618 
 619 
 620 void TemplateTable::lload() {
 621   transition(vtos, ltos);
 622   const Register Rlocal_index = R2_tmp;
 623 
 624   locals_index(Rlocal_index);
 625   load_category2_local(Rlocal_index, R3_tmp);
 626 }
 627 
 628 
 629 void TemplateTable::fload() {
 630   transition(vtos, ftos);
 631   const Register Rlocal_index = R2_tmp;
 632 
 633   // Get the local value into tos
 634   locals_index(Rlocal_index);
 635   Address local = load_faddress(Rlocal_index, Rtemp);
 636 #ifdef __SOFTFP__
 637   __ ldr(R0_tos, local);
 638 #else
 639   __ ldr_float(S0_tos, local);
 640 #endif // __SOFTFP__
 641 }
 642 
 643 
 644 void TemplateTable::dload() {
 645   transition(vtos, dtos);
 646   const Register Rlocal_index = R2_tmp;
 647 
 648   locals_index(Rlocal_index);
 649 
 650 #ifdef __SOFTFP__
 651   load_category2_local(Rlocal_index, R3_tmp);
 652 #else
 653   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 654 #endif // __SOFTFP__
 655 }
 656 
 657 
 658 void TemplateTable::aload() {
 659   transition(vtos, atos);
 660   const Register Rlocal_index = R1_tmp;
 661 
 662   locals_index(Rlocal_index);
 663   Address local = load_aaddress(Rlocal_index, Rtemp);
 664   __ ldr(R0_tos, local);
 665 }
 666 
 667 
 668 void TemplateTable::locals_index_wide(Register reg) {
 669   assert_different_registers(reg, Rtemp);
 670   __ ldrb(Rtemp, at_bcp(2));
 671   __ ldrb(reg, at_bcp(3));
 672   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 673 }
 674 
 675 
 676 void TemplateTable::wide_iload() {
 677   transition(vtos, itos);
 678   const Register Rlocal_index = R2_tmp;
 679 
 680   locals_index_wide(Rlocal_index);
 681   Address local = load_iaddress(Rlocal_index, Rtemp);
 682   __ ldr_s32(R0_tos, local);
 683 }
 684 
 685 
 686 void TemplateTable::wide_lload() {
 687   transition(vtos, ltos);
 688   const Register Rlocal_index = R2_tmp;
 689   const Register Rlocal_base = R3_tmp;
 690 
 691   locals_index_wide(Rlocal_index);
 692   load_category2_local(Rlocal_index, R3_tmp);
 693 }
 694 
 695 
 696 void TemplateTable::wide_fload() {
 697   transition(vtos, ftos);
 698   const Register Rlocal_index = R2_tmp;
 699 
 700   locals_index_wide(Rlocal_index);
 701   Address local = load_faddress(Rlocal_index, Rtemp);
 702 #ifdef __SOFTFP__
 703   __ ldr(R0_tos, local);
 704 #else
 705   __ ldr_float(S0_tos, local);
 706 #endif // __SOFTFP__
 707 }
 708 
 709 
 710 void TemplateTable::wide_dload() {
 711   transition(vtos, dtos);
 712   const Register Rlocal_index = R2_tmp;
 713 
 714   locals_index_wide(Rlocal_index);
 715 #ifdef __SOFTFP__
 716   load_category2_local(Rlocal_index, R3_tmp);
 717 #else
 718   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 719 #endif // __SOFTFP__
 720 }
 721 
 722 
 723 void TemplateTable::wide_aload() {
 724   transition(vtos, atos);
 725   const Register Rlocal_index = R2_tmp;
 726 
 727   locals_index_wide(Rlocal_index);
 728   Address local = load_aaddress(Rlocal_index, Rtemp);
 729   __ ldr(R0_tos, local);
 730 }
 731 
 732 void TemplateTable::index_check(Register array, Register index) {
 733   // Pop ptr into array
 734   __ pop_ptr(array);
 735   index_check_without_pop(array, index);
 736 }
 737 
 738 void TemplateTable::index_check_without_pop(Register array, Register index) {
 739   assert_different_registers(array, index, Rtemp);
 740   // check array
 741   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 742   // check index
 743   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 744   __ cmp_32(index, Rtemp);
 745   if (index != R4_ArrayIndexOutOfBounds_index) {
 746     // convention with generate_ArrayIndexOutOfBounds_handler()
 747     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 748   }
 749   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 750 }
 751 
 752 
 753 void TemplateTable::iaload() {
 754   transition(itos, itos);
 755   const Register Rarray = R1_tmp;
 756   const Register Rindex = R0_tos;
 757 
 758   index_check(Rarray, Rindex);
 759   __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
 760 }
 761 
 762 
 763 void TemplateTable::laload() {
 764   transition(itos, ltos);
 765   const Register Rarray = R1_tmp;
 766   const Register Rindex = R0_tos;
 767 
 768   index_check(Rarray, Rindex);
 769 
 770 #ifdef AARCH64
 771   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 772 #else
 773   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 774   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
 775   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 776 #endif // AARCH64
 777 }
 778 
 779 
 780 void TemplateTable::faload() {
 781   transition(itos, ftos);
 782   const Register Rarray = R1_tmp;
 783   const Register Rindex = R0_tos;
 784 
 785   index_check(Rarray, Rindex);
 786 
 787   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
 788 #ifdef __SOFTFP__
 789   __ ldr(R0_tos, addr);
 790 #else
 791   __ ldr_float(S0_tos, addr);
 792 #endif // __SOFTFP__
 793 }
 794 
 795 
 796 void TemplateTable::daload() {
 797   transition(itos, dtos);
 798   const Register Rarray = R1_tmp;
 799   const Register Rindex = R0_tos;
 800 
 801   index_check(Rarray, Rindex);
 802 
 803 #ifdef __SOFTFP__
 804   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 805   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
 806   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 807 #else
 808   __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
 809 #endif // __SOFTFP__
 810 }
 811 
 812 
 813 void TemplateTable::aaload() {
 814   transition(itos, atos);
 815   const Register Rarray = R1_tmp;
 816   const Register Rindex = R0_tos;
 817 
 818   index_check(Rarray, Rindex);
 819   do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IN_HEAP_ARRAY);
 820 }
 821 
 822 
 823 void TemplateTable::baload() {
 824   transition(itos, itos);
 825   const Register Rarray = R1_tmp;
 826   const Register Rindex = R0_tos;
 827 
 828   index_check(Rarray, Rindex);
 829   __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
 830 }
 831 
 832 
 833 void TemplateTable::caload() {
 834   transition(itos, itos);
 835   const Register Rarray = R1_tmp;
 836   const Register Rindex = R0_tos;
 837 
 838   index_check(Rarray, Rindex);
 839   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 840 }
 841 
 842 
 843 // iload followed by caload frequent pair
 844 void TemplateTable::fast_icaload() {
 845   transition(vtos, itos);
 846   const Register Rlocal_index = R1_tmp;
 847   const Register Rarray = R1_tmp;
 848   const Register Rindex = R4_tmp; // index_check prefers index on R4
 849   assert_different_registers(Rlocal_index, Rindex);
 850   assert_different_registers(Rarray, Rindex);
 851 
 852   // load index out of locals
 853   locals_index(Rlocal_index);
 854   Address local = load_iaddress(Rlocal_index, Rtemp);
 855   __ ldr_s32(Rindex, local);
 856 
 857   // get array element
 858   index_check(Rarray, Rindex);
 859   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 860 }
 861 
 862 
 863 void TemplateTable::saload() {
 864   transition(itos, itos);
 865   const Register Rarray = R1_tmp;
 866   const Register Rindex = R0_tos;
 867 
 868   index_check(Rarray, Rindex);
 869   __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
 870 }
 871 
 872 
 873 void TemplateTable::iload(int n) {
 874   transition(vtos, itos);
 875   __ ldr_s32(R0_tos, iaddress(n));
 876 }
 877 
 878 
 879 void TemplateTable::lload(int n) {
 880   transition(vtos, ltos);
 881 #ifdef AARCH64
 882   __ ldr(R0_tos, laddress(n));
 883 #else
 884   __ ldr(R0_tos_lo, laddress(n));
 885   __ ldr(R1_tos_hi, haddress(n));
 886 #endif // AARCH64
 887 }
 888 
 889 
 890 void TemplateTable::fload(int n) {
 891   transition(vtos, ftos);
 892 #ifdef __SOFTFP__
 893   __ ldr(R0_tos, faddress(n));
 894 #else
 895   __ ldr_float(S0_tos, faddress(n));
 896 #endif // __SOFTFP__
 897 }
 898 
 899 
 900 void TemplateTable::dload(int n) {
 901   transition(vtos, dtos);
 902 #ifdef __SOFTFP__
 903   __ ldr(R0_tos_lo, laddress(n));
 904   __ ldr(R1_tos_hi, haddress(n));
 905 #else
 906   __ ldr_double(D0_tos, daddress(n));
 907 #endif // __SOFTFP__
 908 }
 909 
 910 
 911 void TemplateTable::aload(int n) {
 912   transition(vtos, atos);
 913   __ ldr(R0_tos, aaddress(n));
 914 }
 915 
 916 void TemplateTable::aload_0() {
 917   aload_0_internal();
 918 }
 919 
 920 void TemplateTable::nofast_aload_0() {
 921   aload_0_internal(may_not_rewrite);
 922 }
 923 
 924 void TemplateTable::aload_0_internal(RewriteControl rc) {
 925   transition(vtos, atos);
 926   // According to bytecode histograms, the pairs:
 927   //
 928   // _aload_0, _fast_igetfield
 929   // _aload_0, _fast_agetfield
 930   // _aload_0, _fast_fgetfield
 931   //
 932   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
 933   // bytecode checks if the next bytecode is either _fast_igetfield,
 934   // _fast_agetfield or _fast_fgetfield and then rewrites the
 935   // current bytecode into a pair bytecode; otherwise it rewrites the current
 936   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
 937   //
 938   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
 939   //       otherwise we may miss an opportunity for a pair.
 940   //
 941   // Also rewrite frequent pairs
 942   //   aload_0, aload_1
 943   //   aload_0, iload_1
 944   // These bytecodes with a small amount of code are most profitable to rewrite
 945   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 946     Label rewrite, done;
 947     const Register next_bytecode = R1_tmp;
 948     const Register target_bytecode = R2_tmp;
 949 
 950     // get next byte
 951     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 952 
 953     // if _getfield then wait with rewrite
 954     __ cmp(next_bytecode, Bytecodes::_getfield);
 955     __ b(done, eq);
 956 
 957     // if _igetfield then rewrite to _fast_iaccess_0
 958     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 959     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
 960     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
 961     __ b(rewrite, eq);
 962 
 963     // if _agetfield then rewrite to _fast_aaccess_0
 964     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 965     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
 966     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
 967     __ b(rewrite, eq);
 968 
 969     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
 970     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 971     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 972 
 973     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
 974 #ifdef AARCH64
 975     __ mov(Rtemp, Bytecodes::_fast_faccess_0);
 976     __ mov(target_bytecode, Bytecodes::_fast_aload_0);
 977     __ mov(target_bytecode, Rtemp, eq);
 978 #else
 979     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
 980     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
 981 #endif // AARCH64
 982 
 983     // rewrite
 984     __ bind(rewrite);
 985     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
 986 
 987     __ bind(done);
 988   }
 989 
 990   aload(0);
 991 }
 992 
 993 void TemplateTable::istore() {
 994   transition(itos, vtos);
 995   const Register Rlocal_index = R2_tmp;
 996 
 997   locals_index(Rlocal_index);
 998   Address local = load_iaddress(Rlocal_index, Rtemp);
 999   __ str_32(R0_tos, local);
1000 }
1001 
1002 
1003 void TemplateTable::lstore() {
1004   transition(ltos, vtos);
1005   const Register Rlocal_index = R2_tmp;
1006 
1007   locals_index(Rlocal_index);
1008   store_category2_local(Rlocal_index, R3_tmp);
1009 }
1010 
1011 
1012 void TemplateTable::fstore() {
1013   transition(ftos, vtos);
1014   const Register Rlocal_index = R2_tmp;
1015 
1016   locals_index(Rlocal_index);
1017   Address local = load_faddress(Rlocal_index, Rtemp);
1018 #ifdef __SOFTFP__
1019   __ str(R0_tos, local);
1020 #else
1021   __ str_float(S0_tos, local);
1022 #endif // __SOFTFP__
1023 }
1024 
1025 
1026 void TemplateTable::dstore() {
1027   transition(dtos, vtos);
1028   const Register Rlocal_index = R2_tmp;
1029 
1030   locals_index(Rlocal_index);
1031 
1032 #ifdef __SOFTFP__
1033   store_category2_local(Rlocal_index, R3_tmp);
1034 #else
1035   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1036 #endif // __SOFTFP__
1037 }
1038 
1039 
1040 void TemplateTable::astore() {
1041   transition(vtos, vtos);
1042   const Register Rlocal_index = R1_tmp;
1043 
1044   __ pop_ptr(R0_tos);
1045   locals_index(Rlocal_index);
1046   Address local = load_aaddress(Rlocal_index, Rtemp);
1047   __ str(R0_tos, local);
1048 }
1049 
1050 
1051 void TemplateTable::wide_istore() {
1052   transition(vtos, vtos);
1053   const Register Rlocal_index = R2_tmp;
1054 
1055   __ pop_i(R0_tos);
1056   locals_index_wide(Rlocal_index);
1057   Address local = load_iaddress(Rlocal_index, Rtemp);
1058   __ str_32(R0_tos, local);
1059 }
1060 
1061 
1062 void TemplateTable::wide_lstore() {
1063   transition(vtos, vtos);
1064   const Register Rlocal_index = R2_tmp;
1065   const Register Rlocal_base = R3_tmp;
1066 
1067 #ifdef AARCH64
1068   __ pop_l(R0_tos);
1069 #else
1070   __ pop_l(R0_tos_lo, R1_tos_hi);
1071 #endif // AARCH64
1072 
1073   locals_index_wide(Rlocal_index);
1074   store_category2_local(Rlocal_index, R3_tmp);
1075 }
1076 
1077 
1078 void TemplateTable::wide_fstore() {
1079   wide_istore();
1080 }
1081 
1082 
1083 void TemplateTable::wide_dstore() {
1084   wide_lstore();
1085 }
1086 
1087 
1088 void TemplateTable::wide_astore() {
1089   transition(vtos, vtos);
1090   const Register Rlocal_index = R2_tmp;
1091 
1092   __ pop_ptr(R0_tos);
1093   locals_index_wide(Rlocal_index);
1094   Address local = load_aaddress(Rlocal_index, Rtemp);
1095   __ str(R0_tos, local);
1096 }
1097 
1098 
1099 void TemplateTable::iastore() {
1100   transition(itos, vtos);
1101   const Register Rindex = R4_tmp; // index_check prefers index in R4
1102   const Register Rarray = R3_tmp;
1103   // R0_tos: value
1104 
1105   __ pop_i(Rindex);
1106   index_check(Rarray, Rindex);
1107   __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1108 }
1109 
1110 
1111 void TemplateTable::lastore() {
1112   transition(ltos, vtos);
1113   const Register Rindex = R4_tmp; // index_check prefers index in R4
1114   const Register Rarray = R3_tmp;
1115   // R0_tos_lo:R1_tos_hi: value
1116 
1117   __ pop_i(Rindex);
1118   index_check(Rarray, Rindex);
1119 
1120 #ifdef AARCH64
1121   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1122 #else
1123   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1124   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1125   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1126 #endif // AARCH64
1127 }
1128 
1129 
1130 void TemplateTable::fastore() {
1131   transition(ftos, vtos);
1132   const Register Rindex = R4_tmp; // index_check prefers index in R4
1133   const Register Rarray = R3_tmp;
1134   // S0_tos/R0_tos: value
1135 
1136   __ pop_i(Rindex);
1137   index_check(Rarray, Rindex);
1138   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1139 
1140 #ifdef __SOFTFP__
1141   __ str(R0_tos, addr);
1142 #else
1143   __ str_float(S0_tos, addr);
1144 #endif // __SOFTFP__
1145 }
1146 
1147 
1148 void TemplateTable::dastore() {
1149   transition(dtos, vtos);
1150   const Register Rindex = R4_tmp; // index_check prefers index in R4
1151   const Register Rarray = R3_tmp;
1152   // D0_tos / R0_tos_lo:R1_to_hi: value
1153 
1154   __ pop_i(Rindex);
1155   index_check(Rarray, Rindex);
1156 
1157 #ifdef __SOFTFP__
1158   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1159   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1160   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1161 #else
1162   __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1163 #endif // __SOFTFP__
1164 }
1165 
1166 
1167 void TemplateTable::aastore() {
1168   transition(vtos, vtos);
1169   Label is_null, throw_array_store, done;
1170 
1171   const Register Raddr_1   = R1_tmp;
1172   const Register Rvalue_2  = R2_tmp;
1173   const Register Rarray_3  = R3_tmp;
1174   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1175   const Register Rsub_5    = R5_tmp;
1176   const Register Rsuper_LR = LR_tmp;
1177 
1178   // stack: ..., array, index, value
1179   __ ldr(Rvalue_2, at_tos());     // Value
1180   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1181   __ ldr(Rarray_3, at_tos_p2());  // Array
1182 
1183   index_check_without_pop(Rarray_3, Rindex_4);
1184 
1185   // Compute the array base
1186   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1187 
1188   // do array store check - check for NULL value first
1189   __ cbz(Rvalue_2, is_null);
1190 
1191   // Load subklass
1192   __ load_klass(Rsub_5, Rvalue_2);
1193   // Load superklass
1194   __ load_klass(Rtemp, Rarray_3);
1195   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1196 
1197   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1198   // Come here on success
1199 
1200   // Store value
1201   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1202 
1203   // Now store using the appropriate barrier
1204   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IN_HEAP_ARRAY);
1205   __ b(done);
1206 
1207   __ bind(throw_array_store);
1208 
1209   // Come here on failure of subtype check
1210   __ profile_typecheck_failed(R0_tmp);
1211 
1212   // object is at TOS
1213   __ b(Interpreter::_throw_ArrayStoreException_entry);
1214 
1215   // Have a NULL in Rvalue_2, store NULL at array[index].
1216   __ bind(is_null);
1217   __ profile_null_seen(R0_tmp);
1218 
1219   // Store a NULL
1220   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IN_HEAP_ARRAY);
1221 
1222   // Pop stack arguments
1223   __ bind(done);
1224   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1225 }
1226 
1227 
1228 void TemplateTable::bastore() {
1229   transition(itos, vtos);
1230   const Register Rindex = R4_tmp; // index_check prefers index in R4
1231   const Register Rarray = R3_tmp;
1232   // R0_tos: value
1233 
1234   __ pop_i(Rindex);
1235   index_check(Rarray, Rindex);
1236 
1237   // Need to check whether array is boolean or byte
1238   // since both types share the bastore bytecode.
1239   __ load_klass(Rtemp, Rarray);
1240   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1241   Label L_skip;
1242   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1243   __ b(L_skip, eq);
1244   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1245   __ bind(L_skip);
1246   __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1247 }
1248 
1249 
1250 void TemplateTable::castore() {
1251   transition(itos, vtos);
1252   const Register Rindex = R4_tmp; // index_check prefers index in R4
1253   const Register Rarray = R3_tmp;
1254   // R0_tos: value
1255 
1256   __ pop_i(Rindex);
1257   index_check(Rarray, Rindex);
1258 
1259   __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1260 }
1261 
1262 
1263 void TemplateTable::sastore() {
1264   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1265            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1266          "base offsets for char and short should be equal");
1267   castore();
1268 }
1269 
1270 
1271 void TemplateTable::istore(int n) {
1272   transition(itos, vtos);
1273   __ str_32(R0_tos, iaddress(n));
1274 }
1275 
1276 
1277 void TemplateTable::lstore(int n) {
1278   transition(ltos, vtos);
1279 #ifdef AARCH64
1280   __ str(R0_tos, laddress(n));
1281 #else
1282   __ str(R0_tos_lo, laddress(n));
1283   __ str(R1_tos_hi, haddress(n));
1284 #endif // AARCH64
1285 }
1286 
1287 
1288 void TemplateTable::fstore(int n) {
1289   transition(ftos, vtos);
1290 #ifdef __SOFTFP__
1291   __ str(R0_tos, faddress(n));
1292 #else
1293   __ str_float(S0_tos, faddress(n));
1294 #endif // __SOFTFP__
1295 }
1296 
1297 
1298 void TemplateTable::dstore(int n) {
1299   transition(dtos, vtos);
1300 #ifdef __SOFTFP__
1301   __ str(R0_tos_lo, laddress(n));
1302   __ str(R1_tos_hi, haddress(n));
1303 #else
1304   __ str_double(D0_tos, daddress(n));
1305 #endif // __SOFTFP__
1306 }
1307 
1308 
1309 void TemplateTable::astore(int n) {
1310   transition(vtos, vtos);
1311   __ pop_ptr(R0_tos);
1312   __ str(R0_tos, aaddress(n));
1313 }
1314 
1315 
1316 void TemplateTable::pop() {
1317   transition(vtos, vtos);
1318   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1319 }
1320 
1321 
1322 void TemplateTable::pop2() {
1323   transition(vtos, vtos);
1324   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1325 }
1326 
1327 
1328 void TemplateTable::dup() {
1329   transition(vtos, vtos);
1330   // stack: ..., a
1331   __ load_ptr(0, R0_tmp);
1332   __ push_ptr(R0_tmp);
1333   // stack: ..., a, a
1334 }
1335 
1336 
1337 void TemplateTable::dup_x1() {
1338   transition(vtos, vtos);
1339   // stack: ..., a, b
1340   __ load_ptr(0, R0_tmp);  // load b
1341   __ load_ptr(1, R2_tmp);  // load a
1342   __ store_ptr(1, R0_tmp); // store b
1343   __ store_ptr(0, R2_tmp); // store a
1344   __ push_ptr(R0_tmp);     // push b
1345   // stack: ..., b, a, b
1346 }
1347 
1348 
1349 void TemplateTable::dup_x2() {
1350   transition(vtos, vtos);
1351   // stack: ..., a, b, c
1352   __ load_ptr(0, R0_tmp);   // load c
1353   __ load_ptr(1, R2_tmp);   // load b
1354   __ load_ptr(2, R4_tmp);   // load a
1355 
1356   __ push_ptr(R0_tmp);      // push c
1357 
1358   // stack: ..., a, b, c, c
1359   __ store_ptr(1, R2_tmp);  // store b
1360   __ store_ptr(2, R4_tmp);  // store a
1361   __ store_ptr(3, R0_tmp);  // store c
1362   // stack: ..., c, a, b, c
1363 }
1364 
1365 
1366 void TemplateTable::dup2() {
1367   transition(vtos, vtos);
1368   // stack: ..., a, b
1369   __ load_ptr(1, R0_tmp);  // load a
1370   __ push_ptr(R0_tmp);     // push a
1371   __ load_ptr(1, R0_tmp);  // load b
1372   __ push_ptr(R0_tmp);     // push b
1373   // stack: ..., a, b, a, b
1374 }
1375 
1376 
1377 void TemplateTable::dup2_x1() {
1378   transition(vtos, vtos);
1379 
1380   // stack: ..., a, b, c
1381   __ load_ptr(0, R4_tmp);  // load c
1382   __ load_ptr(1, R2_tmp);  // load b
1383   __ load_ptr(2, R0_tmp);  // load a
1384 
1385   __ push_ptr(R2_tmp);     // push b
1386   __ push_ptr(R4_tmp);     // push c
1387 
1388   // stack: ..., a, b, c, b, c
1389 
1390   __ store_ptr(2, R0_tmp);  // store a
1391   __ store_ptr(3, R4_tmp);  // store c
1392   __ store_ptr(4, R2_tmp);  // store b
1393 
1394   // stack: ..., b, c, a, b, c
1395 }
1396 
1397 
1398 void TemplateTable::dup2_x2() {
1399   transition(vtos, vtos);
1400   // stack: ..., a, b, c, d
1401   __ load_ptr(0, R0_tmp);  // load d
1402   __ load_ptr(1, R2_tmp);  // load c
1403   __ push_ptr(R2_tmp);     // push c
1404   __ push_ptr(R0_tmp);     // push d
1405   // stack: ..., a, b, c, d, c, d
1406   __ load_ptr(4, R4_tmp);  // load b
1407   __ store_ptr(4, R0_tmp); // store d in b
1408   __ store_ptr(2, R4_tmp); // store b in d
1409   // stack: ..., a, d, c, b, c, d
1410   __ load_ptr(5, R4_tmp);  // load a
1411   __ store_ptr(5, R2_tmp); // store c in a
1412   __ store_ptr(3, R4_tmp); // store a in c
1413   // stack: ..., c, d, a, b, c, d
1414 }
1415 
1416 
1417 void TemplateTable::swap() {
1418   transition(vtos, vtos);
1419   // stack: ..., a, b
1420   __ load_ptr(1, R0_tmp);  // load a
1421   __ load_ptr(0, R2_tmp);  // load b
1422   __ store_ptr(0, R0_tmp); // store a in b
1423   __ store_ptr(1, R2_tmp); // store b in a
1424   // stack: ..., b, a
1425 }
1426 
1427 
1428 void TemplateTable::iop2(Operation op) {
1429   transition(itos, itos);
1430   const Register arg1 = R1_tmp;
1431   const Register arg2 = R0_tos;
1432 
1433   __ pop_i(arg1);
1434   switch (op) {
1435     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1436     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1437     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1438     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1439     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1440     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1441 #ifdef AARCH64
1442     case shl  : __ lslv_w (R0_tos, arg1, arg2); break;
1443     case shr  : __ asrv_w (R0_tos, arg1, arg2); break;
1444     case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1445 #else
1446     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1447     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1448     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1449 #endif // AARCH64
1450     default   : ShouldNotReachHere();
1451   }
1452 }
1453 
1454 
1455 void TemplateTable::lop2(Operation op) {
1456   transition(ltos, ltos);
1457 #ifdef AARCH64
1458   const Register arg1 = R1_tmp;
1459   const Register arg2 = R0_tos;
1460 
1461   __ pop_l(arg1);
1462   switch (op) {
1463     case add  : __ add (R0_tos, arg1, arg2); break;
1464     case sub  : __ sub (R0_tos, arg1, arg2); break;
1465     case _and : __ andr(R0_tos, arg1, arg2); break;
1466     case _or  : __ orr (R0_tos, arg1, arg2); break;
1467     case _xor : __ eor (R0_tos, arg1, arg2); break;
1468     default   : ShouldNotReachHere();
1469   }
1470 #else
1471   const Register arg1_lo = R2_tmp;
1472   const Register arg1_hi = R3_tmp;
1473   const Register arg2_lo = R0_tos_lo;
1474   const Register arg2_hi = R1_tos_hi;
1475 
1476   __ pop_l(arg1_lo, arg1_hi);
1477   switch (op) {
1478     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1479     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1480     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1481     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1482     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1483     default : ShouldNotReachHere();
1484   }
1485 #endif // AARCH64
1486 }
1487 
1488 
1489 void TemplateTable::idiv() {
1490   transition(itos, itos);
1491 #ifdef AARCH64
1492   const Register divisor = R0_tos;
1493   const Register dividend = R1_tmp;
1494 
1495   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1496   __ pop_i(dividend);
1497   __ sdiv_w(R0_tos, dividend, divisor);
1498 #else
1499   __ mov(R2, R0_tos);
1500   __ pop_i(R0);
1501   // R0 - dividend
1502   // R2 - divisor
1503   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1504   // R1 - result
1505   __ mov(R0_tos, R1);
1506 #endif // AARCH64
1507 }
1508 
1509 
1510 void TemplateTable::irem() {
1511   transition(itos, itos);
1512 #ifdef AARCH64
1513   const Register divisor = R0_tos;
1514   const Register dividend = R1_tmp;
1515   const Register quotient = R2_tmp;
1516 
1517   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1518   __ pop_i(dividend);
1519   __ sdiv_w(quotient, dividend, divisor);
1520   __ msub_w(R0_tos, divisor, quotient, dividend);
1521 #else
1522   __ mov(R2, R0_tos);
1523   __ pop_i(R0);
1524   // R0 - dividend
1525   // R2 - divisor
1526   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1527   // R0 - remainder
1528 #endif // AARCH64
1529 }
1530 
1531 
1532 void TemplateTable::lmul() {
1533   transition(ltos, ltos);
1534 #ifdef AARCH64
1535   const Register arg1 = R0_tos;
1536   const Register arg2 = R1_tmp;
1537 
1538   __ pop_l(arg2);
1539   __ mul(R0_tos, arg1, arg2);
1540 #else
1541   const Register arg1_lo = R0_tos_lo;
1542   const Register arg1_hi = R1_tos_hi;
1543   const Register arg2_lo = R2_tmp;
1544   const Register arg2_hi = R3_tmp;
1545 
1546   __ pop_l(arg2_lo, arg2_hi);
1547 
1548   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1549 #endif // AARCH64
1550 }
1551 
1552 
1553 void TemplateTable::ldiv() {
1554   transition(ltos, ltos);
1555 #ifdef AARCH64
1556   const Register divisor = R0_tos;
1557   const Register dividend = R1_tmp;
1558 
1559   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1560   __ pop_l(dividend);
1561   __ sdiv(R0_tos, dividend, divisor);
1562 #else
1563   const Register x_lo = R2_tmp;
1564   const Register x_hi = R3_tmp;
1565   const Register y_lo = R0_tos_lo;
1566   const Register y_hi = R1_tos_hi;
1567 
1568   __ pop_l(x_lo, x_hi);
1569 
1570   // check if y = 0
1571   __ orrs(Rtemp, y_lo, y_hi);
1572   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1573   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1574 #endif // AARCH64
1575 }
1576 
1577 
1578 void TemplateTable::lrem() {
1579   transition(ltos, ltos);
1580 #ifdef AARCH64
1581   const Register divisor = R0_tos;
1582   const Register dividend = R1_tmp;
1583   const Register quotient = R2_tmp;
1584 
1585   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1586   __ pop_l(dividend);
1587   __ sdiv(quotient, dividend, divisor);
1588   __ msub(R0_tos, divisor, quotient, dividend);
1589 #else
1590   const Register x_lo = R2_tmp;
1591   const Register x_hi = R3_tmp;
1592   const Register y_lo = R0_tos_lo;
1593   const Register y_hi = R1_tos_hi;
1594 
1595   __ pop_l(x_lo, x_hi);
1596 
1597   // check if y = 0
1598   __ orrs(Rtemp, y_lo, y_hi);
1599   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1600   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1601 #endif // AARCH64
1602 }
1603 
1604 
1605 void TemplateTable::lshl() {
1606   transition(itos, ltos);
1607 #ifdef AARCH64
1608   const Register val = R1_tmp;
1609   const Register shift_cnt = R0_tos;
1610   __ pop_l(val);
1611   __ lslv(R0_tos, val, shift_cnt);
1612 #else
1613   const Register shift_cnt = R4_tmp;
1614   const Register val_lo = R2_tmp;
1615   const Register val_hi = R3_tmp;
1616 
1617   __ pop_l(val_lo, val_hi);
1618   __ andr(shift_cnt, R0_tos, 63);
1619   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1620 #endif // AARCH64
1621 }
1622 
1623 
1624 void TemplateTable::lshr() {
1625   transition(itos, ltos);
1626 #ifdef AARCH64
1627   const Register val = R1_tmp;
1628   const Register shift_cnt = R0_tos;
1629   __ pop_l(val);
1630   __ asrv(R0_tos, val, shift_cnt);
1631 #else
1632   const Register shift_cnt = R4_tmp;
1633   const Register val_lo = R2_tmp;
1634   const Register val_hi = R3_tmp;
1635 
1636   __ pop_l(val_lo, val_hi);
1637   __ andr(shift_cnt, R0_tos, 63);
1638   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1639 #endif // AARCH64
1640 }
1641 
1642 
1643 void TemplateTable::lushr() {
1644   transition(itos, ltos);
1645 #ifdef AARCH64
1646   const Register val = R1_tmp;
1647   const Register shift_cnt = R0_tos;
1648   __ pop_l(val);
1649   __ lsrv(R0_tos, val, shift_cnt);
1650 #else
1651   const Register shift_cnt = R4_tmp;
1652   const Register val_lo = R2_tmp;
1653   const Register val_hi = R3_tmp;
1654 
1655   __ pop_l(val_lo, val_hi);
1656   __ andr(shift_cnt, R0_tos, 63);
1657   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1658 #endif // AARCH64
1659 }
1660 
1661 
1662 void TemplateTable::fop2(Operation op) {
1663   transition(ftos, ftos);
1664 #ifdef __SOFTFP__
1665   __ mov(R1, R0_tos);
1666   __ pop_i(R0);
1667   switch (op) {
1668     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1669     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1670     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1671     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1672     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1673     default : ShouldNotReachHere();
1674   }
1675 #else
1676   const FloatRegister arg1 = S1_tmp;
1677   const FloatRegister arg2 = S0_tos;
1678 
1679   switch (op) {
1680     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1681     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1682     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1683     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1684     case rem:
1685 #ifndef __ABI_HARD__
1686       __ pop_f(arg1);
1687       __ fmrs(R0, arg1);
1688       __ fmrs(R1, arg2);
1689       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1690       __ fmsr(S0_tos, R0);
1691 #else
1692       __ mov_float(S1_reg, arg2);
1693       __ pop_f(S0);
1694       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1695 #endif // !__ABI_HARD__
1696       break;
1697     default : ShouldNotReachHere();
1698   }
1699 #endif // __SOFTFP__
1700 }
1701 
1702 
1703 void TemplateTable::dop2(Operation op) {
1704   transition(dtos, dtos);
1705 #ifdef __SOFTFP__
1706   __ mov(R2, R0_tos_lo);
1707   __ mov(R3, R1_tos_hi);
1708   __ pop_l(R0, R1);
1709   switch (op) {
1710     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1711     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1712     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1713     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1714     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1715     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1716     default : ShouldNotReachHere();
1717   }
1718 #else
1719   const FloatRegister arg1 = D1_tmp;
1720   const FloatRegister arg2 = D0_tos;
1721 
1722   switch (op) {
1723     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1724     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1725     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1726     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1727     case rem:
1728 #ifndef __ABI_HARD__
1729       __ pop_d(arg1);
1730       __ fmrrd(R0, R1, arg1);
1731       __ fmrrd(R2, R3, arg2);
1732       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1733       __ fmdrr(D0_tos, R0, R1);
1734 #else
1735       __ mov_double(D1, arg2);
1736       __ pop_d(D0);
1737       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1738 #endif // !__ABI_HARD__
1739       break;
1740     default : ShouldNotReachHere();
1741   }
1742 #endif // __SOFTFP__
1743 }
1744 
1745 
1746 void TemplateTable::ineg() {
1747   transition(itos, itos);
1748   __ neg_32(R0_tos, R0_tos);
1749 }
1750 
1751 
1752 void TemplateTable::lneg() {
1753   transition(ltos, ltos);
1754 #ifdef AARCH64
1755   __ neg(R0_tos, R0_tos);
1756 #else
1757   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1758   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1759 #endif // AARCH64
1760 }
1761 
1762 
1763 void TemplateTable::fneg() {
1764   transition(ftos, ftos);
1765 #ifdef __SOFTFP__
1766   // Invert sign bit
1767   const int sign_mask = 0x80000000;
1768   __ eor(R0_tos, R0_tos, sign_mask);
1769 #else
1770   __ neg_float(S0_tos, S0_tos);
1771 #endif // __SOFTFP__
1772 }
1773 
1774 
1775 void TemplateTable::dneg() {
1776   transition(dtos, dtos);
1777 #ifdef __SOFTFP__
1778   // Invert sign bit in the high part of the double
1779   const int sign_mask_hi = 0x80000000;
1780   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1781 #else
1782   __ neg_double(D0_tos, D0_tos);
1783 #endif // __SOFTFP__
1784 }
1785 
1786 
1787 void TemplateTable::iinc() {
1788   transition(vtos, vtos);
1789   const Register Rconst = R2_tmp;
1790   const Register Rlocal_index = R1_tmp;
1791   const Register Rval = R0_tmp;
1792 
1793   __ ldrsb(Rconst, at_bcp(2));
1794   locals_index(Rlocal_index);
1795   Address local = load_iaddress(Rlocal_index, Rtemp);
1796   __ ldr_s32(Rval, local);
1797   __ add(Rval, Rval, Rconst);
1798   __ str_32(Rval, local);
1799 }
1800 
1801 
1802 void TemplateTable::wide_iinc() {
1803   transition(vtos, vtos);
1804   const Register Rconst = R2_tmp;
1805   const Register Rlocal_index = R1_tmp;
1806   const Register Rval = R0_tmp;
1807 
1808   // get constant in Rconst
1809   __ ldrsb(R2_tmp, at_bcp(4));
1810   __ ldrb(R3_tmp, at_bcp(5));
1811   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1812 
1813   locals_index_wide(Rlocal_index);
1814   Address local = load_iaddress(Rlocal_index, Rtemp);
1815   __ ldr_s32(Rval, local);
1816   __ add(Rval, Rval, Rconst);
1817   __ str_32(Rval, local);
1818 }
1819 
1820 
1821 void TemplateTable::convert() {
1822   // Checking
1823 #ifdef ASSERT
1824   { TosState tos_in  = ilgl;
1825     TosState tos_out = ilgl;
1826     switch (bytecode()) {
1827       case Bytecodes::_i2l: // fall through
1828       case Bytecodes::_i2f: // fall through
1829       case Bytecodes::_i2d: // fall through
1830       case Bytecodes::_i2b: // fall through
1831       case Bytecodes::_i2c: // fall through
1832       case Bytecodes::_i2s: tos_in = itos; break;
1833       case Bytecodes::_l2i: // fall through
1834       case Bytecodes::_l2f: // fall through
1835       case Bytecodes::_l2d: tos_in = ltos; break;
1836       case Bytecodes::_f2i: // fall through
1837       case Bytecodes::_f2l: // fall through
1838       case Bytecodes::_f2d: tos_in = ftos; break;
1839       case Bytecodes::_d2i: // fall through
1840       case Bytecodes::_d2l: // fall through
1841       case Bytecodes::_d2f: tos_in = dtos; break;
1842       default             : ShouldNotReachHere();
1843     }
1844     switch (bytecode()) {
1845       case Bytecodes::_l2i: // fall through
1846       case Bytecodes::_f2i: // fall through
1847       case Bytecodes::_d2i: // fall through
1848       case Bytecodes::_i2b: // fall through
1849       case Bytecodes::_i2c: // fall through
1850       case Bytecodes::_i2s: tos_out = itos; break;
1851       case Bytecodes::_i2l: // fall through
1852       case Bytecodes::_f2l: // fall through
1853       case Bytecodes::_d2l: tos_out = ltos; break;
1854       case Bytecodes::_i2f: // fall through
1855       case Bytecodes::_l2f: // fall through
1856       case Bytecodes::_d2f: tos_out = ftos; break;
1857       case Bytecodes::_i2d: // fall through
1858       case Bytecodes::_l2d: // fall through
1859       case Bytecodes::_f2d: tos_out = dtos; break;
1860       default             : ShouldNotReachHere();
1861     }
1862     transition(tos_in, tos_out);
1863   }
1864 #endif // ASSERT
1865 
1866   // Conversion
1867   switch (bytecode()) {
1868     case Bytecodes::_i2l:
1869 #ifdef AARCH64
1870       __ sign_extend(R0_tos, R0_tos, 32);
1871 #else
1872       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1873 #endif // AARCH64
1874       break;
1875 
1876     case Bytecodes::_i2f:
1877 #ifdef AARCH64
1878       __ scvtf_sw(S0_tos, R0_tos);
1879 #else
1880 #ifdef __SOFTFP__
1881       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1882 #else
1883       __ fmsr(S0_tmp, R0_tos);
1884       __ fsitos(S0_tos, S0_tmp);
1885 #endif // __SOFTFP__
1886 #endif // AARCH64
1887       break;
1888 
1889     case Bytecodes::_i2d:
1890 #ifdef AARCH64
1891       __ scvtf_dw(D0_tos, R0_tos);
1892 #else
1893 #ifdef __SOFTFP__
1894       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1895 #else
1896       __ fmsr(S0_tmp, R0_tos);
1897       __ fsitod(D0_tos, S0_tmp);
1898 #endif // __SOFTFP__
1899 #endif // AARCH64
1900       break;
1901 
1902     case Bytecodes::_i2b:
1903       __ sign_extend(R0_tos, R0_tos, 8);
1904       break;
1905 
1906     case Bytecodes::_i2c:
1907       __ zero_extend(R0_tos, R0_tos, 16);
1908       break;
1909 
1910     case Bytecodes::_i2s:
1911       __ sign_extend(R0_tos, R0_tos, 16);
1912       break;
1913 
1914     case Bytecodes::_l2i:
1915       /* nothing to do */
1916       break;
1917 
1918     case Bytecodes::_l2f:
1919 #ifdef AARCH64
1920       __ scvtf_sx(S0_tos, R0_tos);
1921 #else
1922       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1923 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1924       __ fmsr(S0_tos, R0);
1925 #endif // !__SOFTFP__ && !__ABI_HARD__
1926 #endif // AARCH64
1927       break;
1928 
1929     case Bytecodes::_l2d:
1930 #ifdef AARCH64
1931       __ scvtf_dx(D0_tos, R0_tos);
1932 #else
1933       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1934 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1935       __ fmdrr(D0_tos, R0, R1);
1936 #endif // !__SOFTFP__ && !__ABI_HARD__
1937 #endif // AARCH64
1938       break;
1939 
1940     case Bytecodes::_f2i:
1941 #ifdef AARCH64
1942       __ fcvtzs_ws(R0_tos, S0_tos);
1943 #else
1944 #ifndef __SOFTFP__
1945       __ ftosizs(S0_tos, S0_tos);
1946       __ fmrs(R0_tos, S0_tos);
1947 #else
1948       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1949 #endif // !__SOFTFP__
1950 #endif // AARCH64
1951       break;
1952 
1953     case Bytecodes::_f2l:
1954 #ifdef AARCH64
1955       __ fcvtzs_xs(R0_tos, S0_tos);
1956 #else
1957 #ifndef __SOFTFP__
1958       __ fmrs(R0_tos, S0_tos);
1959 #endif // !__SOFTFP__
1960       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
1961 #endif // AARCH64
1962       break;
1963 
1964     case Bytecodes::_f2d:
1965 #ifdef __SOFTFP__
1966       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
1967 #else
1968       __ convert_f2d(D0_tos, S0_tos);
1969 #endif // __SOFTFP__
1970       break;
1971 
1972     case Bytecodes::_d2i:
1973 #ifdef AARCH64
1974       __ fcvtzs_wd(R0_tos, D0_tos);
1975 #else
1976 #ifndef __SOFTFP__
1977       __ ftosizd(Stemp, D0);
1978       __ fmrs(R0, Stemp);
1979 #else
1980       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
1981 #endif // !__SOFTFP__
1982 #endif // AARCH64
1983       break;
1984 
1985     case Bytecodes::_d2l:
1986 #ifdef AARCH64
1987       __ fcvtzs_xd(R0_tos, D0_tos);
1988 #else
1989 #ifndef __SOFTFP__
1990       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
1991 #endif // !__SOFTFP__
1992       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
1993 #endif // AARCH64
1994       break;
1995 
1996     case Bytecodes::_d2f:
1997 #ifdef __SOFTFP__
1998       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
1999 #else
2000       __ convert_d2f(S0_tos, D0_tos);
2001 #endif // __SOFTFP__
2002       break;
2003 
2004     default:
2005       ShouldNotReachHere();
2006   }
2007 }
2008 
2009 
2010 void TemplateTable::lcmp() {
2011   transition(ltos, itos);
2012 #ifdef AARCH64
2013   const Register arg1 = R1_tmp;
2014   const Register arg2 = R0_tos;
2015 
2016   __ pop_l(arg1);
2017 
2018   __ cmp(arg1, arg2);
2019   __ cset(R0_tos, gt);               // 1 if '>', else 0
2020   __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2021 #else
2022   const Register arg1_lo = R2_tmp;
2023   const Register arg1_hi = R3_tmp;
2024   const Register arg2_lo = R0_tos_lo;
2025   const Register arg2_hi = R1_tos_hi;
2026   const Register res = R4_tmp;
2027 
2028   __ pop_l(arg1_lo, arg1_hi);
2029 
2030   // long compare arg1 with arg2
2031   // result is -1/0/+1 if '<'/'='/'>'
2032   Label done;
2033 
2034   __ mov (res, 0);
2035   __ cmp (arg1_hi, arg2_hi);
2036   __ mvn (res, 0, lt);
2037   __ mov (res, 1, gt);
2038   __ b(done, ne);
2039   __ cmp (arg1_lo, arg2_lo);
2040   __ mvn (res, 0, lo);
2041   __ mov (res, 1, hi);
2042   __ bind(done);
2043   __ mov (R0_tos, res);
2044 #endif // AARCH64
2045 }
2046 
2047 
2048 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2049   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2050 
2051 #ifdef AARCH64
2052   if (is_float) {
2053     transition(ftos, itos);
2054     __ pop_f(S1_tmp);
2055     __ fcmp_s(S1_tmp, S0_tos);
2056   } else {
2057     transition(dtos, itos);
2058     __ pop_d(D1_tmp);
2059     __ fcmp_d(D1_tmp, D0_tos);
2060   }
2061 
2062   if (unordered_result < 0) {
2063     __ cset(R0_tos, gt);               // 1 if '>', else 0
2064     __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2065   } else {
2066     __ cset(R0_tos, hi);               // 1 if '>' or unordered, else 0
2067     __ csinv(R0_tos, R0_tos, ZR, pl);  // previous value if '>=' or unordered, else -1
2068   }
2069 
2070 #else
2071 
2072 #ifdef __SOFTFP__
2073 
2074   if (is_float) {
2075     transition(ftos, itos);
2076     const Register Rx = R0;
2077     const Register Ry = R1;
2078 
2079     __ mov(Ry, R0_tos);
2080     __ pop_i(Rx);
2081 
2082     if (unordered_result == 1) {
2083       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2084     } else {
2085       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2086     }
2087 
2088   } else {
2089 
2090     transition(dtos, itos);
2091     const Register Rx_lo = R0;
2092     const Register Rx_hi = R1;
2093     const Register Ry_lo = R2;
2094     const Register Ry_hi = R3;
2095 
2096     __ mov(Ry_lo, R0_tos_lo);
2097     __ mov(Ry_hi, R1_tos_hi);
2098     __ pop_l(Rx_lo, Rx_hi);
2099 
2100     if (unordered_result == 1) {
2101       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2102     } else {
2103       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2104     }
2105   }
2106 
2107 #else
2108 
2109   if (is_float) {
2110     transition(ftos, itos);
2111     __ pop_f(S1_tmp);
2112     __ fcmps(S1_tmp, S0_tos);
2113   } else {
2114     transition(dtos, itos);
2115     __ pop_d(D1_tmp);
2116     __ fcmpd(D1_tmp, D0_tos);
2117   }
2118 
2119   __ fmstat();
2120 
2121   // comparison result | flag N | flag Z | flag C | flag V
2122   // "<"               |   1    |   0    |   0    |   0
2123   // "=="              |   0    |   1    |   1    |   0
2124   // ">"               |   0    |   0    |   1    |   0
2125   // unordered         |   0    |   0    |   1    |   1
2126 
2127   if (unordered_result < 0) {
2128     __ mov(R0_tos, 1);           // result ==  1 if greater
2129     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2130   } else {
2131     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2132     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2133   }
2134   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2135 #endif // __SOFTFP__
2136 #endif // AARCH64
2137 }
2138 
2139 
2140 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2141 
2142   const Register Rdisp = R0_tmp;
2143   const Register Rbumped_taken_count = R5_tmp;
2144 
2145   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2146 
2147   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2148                              InvocationCounter::counter_offset();
2149   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2150                               InvocationCounter::counter_offset();
2151   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2152 
2153   // Load up R0 with the branch displacement
2154   if (is_wide) {
2155     __ ldrsb(R0_tmp, at_bcp(1));
2156     __ ldrb(R1_tmp, at_bcp(2));
2157     __ ldrb(R2_tmp, at_bcp(3));
2158     __ ldrb(R3_tmp, at_bcp(4));
2159     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2160     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2161     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2162   } else {
2163     __ ldrsb(R0_tmp, at_bcp(1));
2164     __ ldrb(R1_tmp, at_bcp(2));
2165     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2166   }
2167 
2168   // Handle all the JSR stuff here, then exit.
2169   // It's much shorter and cleaner than intermingling with the
2170   // non-JSR normal-branch stuff occuring below.
2171   if (is_jsr) {
2172     // compute return address as bci in R1
2173     const Register Rret_addr = R1_tmp;
2174     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2175 
2176     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2177     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2178     __ sub(Rret_addr, Rret_addr, Rtemp);
2179 
2180     // Load the next target bytecode into R3_bytecode and advance Rbcp
2181 #ifdef AARCH64
2182     __ add(Rbcp, Rbcp, Rdisp);
2183     __ ldrb(R3_bytecode, Address(Rbcp));
2184 #else
2185     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2186 #endif // AARCH64
2187 
2188     // Push return address
2189     __ push_i(Rret_addr);
2190     // jsr returns vtos
2191     __ dispatch_only_noverify(vtos);
2192     return;
2193   }
2194 
2195   // Normal (non-jsr) branch handling
2196 
2197   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2198 #ifdef AARCH64
2199   __ add(Rbcp, Rbcp, Rdisp);
2200   __ ldrb(R3_bytecode, Address(Rbcp));
2201 #else
2202   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2203 #endif // AARCH64
2204 
2205   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2206   Label backedge_counter_overflow;
2207   Label profile_method;
2208   Label dispatch;
2209 
2210   if (UseLoopCounter) {
2211     // increment backedge counter for backward branches
2212     // Rdisp (R0): target offset
2213 
2214     const Register Rcnt = R2_tmp;
2215     const Register Rcounters = R1_tmp;
2216 
2217     // count only if backward branch
2218 #ifdef AARCH64
2219     __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2220 #else
2221     __ tst(Rdisp, Rdisp);
2222     __ b(dispatch, pl);
2223 #endif // AARCH64
2224 
2225     if (TieredCompilation) {
2226       Label no_mdo;
2227       int increment = InvocationCounter::count_increment;
2228       if (ProfileInterpreter) {
2229         // Are we profiling?
2230         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2231         __ cbz(Rtemp, no_mdo);
2232         // Increment the MDO backedge counter
2233         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2234                                                   in_bytes(InvocationCounter::counter_offset()));
2235         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2236         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2237                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2238         __ b(dispatch);
2239       }
2240       __ bind(no_mdo);
2241       // Increment backedge counter in MethodCounters*
2242       // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2243       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2244                              Rdisp, R3_bytecode,
2245                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2246       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2247       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2248                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2249     } else {
2250       // Increment backedge counter in MethodCounters*
2251       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2252                              Rdisp, R3_bytecode,
2253                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2254       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2255       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2256       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2257 
2258       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2259 #ifdef AARCH64
2260       __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value);  // and the status bits
2261 #else
2262       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2263 #endif // AARCH64
2264       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2265 
2266       if (ProfileInterpreter) {
2267         // Test to see if we should create a method data oop
2268         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2269         __ ldr_s32(Rtemp, profile_limit);
2270         __ cmp_32(Rcnt, Rtemp);
2271         __ b(dispatch, lt);
2272 
2273         // if no method data exists, go to profile method
2274         __ test_method_data_pointer(R4_tmp, profile_method);
2275 
2276         if (UseOnStackReplacement) {
2277           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2278           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2279           __ ldr_s32(Rtemp, backward_branch_limit);
2280           __ cmp(Rbumped_taken_count, Rtemp);
2281           __ b(dispatch, lo);
2282 
2283           // When ProfileInterpreter is on, the backedge_count comes from the
2284           // MethodData*, which value does not get reset on the call to
2285           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2286           // routine while the method is being compiled, add a second test to make
2287           // sure the overflow function is called only once every overflow_frequency.
2288           const int overflow_frequency = 1024;
2289 
2290 #ifdef AARCH64
2291           __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2292 #else
2293           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2294           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2295           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2296 #endif // AARCH64
2297 
2298           __ b(backedge_counter_overflow, eq);
2299         }
2300       } else {
2301         if (UseOnStackReplacement) {
2302           // check for overflow against Rcnt, which is the sum of the counters
2303           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2304           __ ldr_s32(Rtemp, backward_branch_limit);
2305           __ cmp_32(Rcnt, Rtemp);
2306           __ b(backedge_counter_overflow, hs);
2307 
2308         }
2309       }
2310     }
2311     __ bind(dispatch);
2312   }
2313 
2314   if (!UseOnStackReplacement) {
2315     __ bind(backedge_counter_overflow);
2316   }
2317 
2318   // continue with the bytecode @ target
2319   __ dispatch_only(vtos);
2320 
2321   if (UseLoopCounter) {
2322     if (ProfileInterpreter) {
2323       // Out-of-line code to allocate method data oop.
2324       __ bind(profile_method);
2325 
2326       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2327       __ set_method_data_pointer_for_bcp();
2328       // reload next bytecode
2329       __ ldrb(R3_bytecode, Address(Rbcp));
2330       __ b(dispatch);
2331     }
2332 
2333     if (UseOnStackReplacement) {
2334       // invocation counter overflow
2335       __ bind(backedge_counter_overflow);
2336 
2337       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2338       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2339 
2340       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2341       const Register Rnmethod = R0;
2342 
2343       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2344 
2345       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2346 
2347       // nmethod may have been invalidated (VM may block upon call_VM return)
2348       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2349       __ cmp(R1_tmp, nmethod::in_use);
2350       __ b(dispatch, ne);
2351 
2352       // We have the address of an on stack replacement routine in Rnmethod,
2353       // We need to prepare to execute the OSR method. First we must
2354       // migrate the locals and monitors off of the stack.
2355 
2356       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2357 
2358       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2359 
2360       // R0 is OSR buffer
2361 
2362       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2363       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2364 
2365 #ifdef AARCH64
2366       __ ldp(FP, LR, Address(FP));
2367       __ mov(SP, Rtemp);
2368 #else
2369       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2370       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2371 #endif // AARCH64
2372 
2373       __ jump(R1_tmp);
2374     }
2375   }
2376 }
2377 
2378 
2379 void TemplateTable::if_0cmp(Condition cc) {
2380   transition(itos, vtos);
2381   // assume branch is more often taken than not (loops use backward branches)
2382   Label not_taken;
2383 #ifdef AARCH64
2384   if (cc == equal) {
2385     __ cbnz_w(R0_tos, not_taken);
2386   } else if (cc == not_equal) {
2387     __ cbz_w(R0_tos, not_taken);
2388   } else {
2389     __ cmp_32(R0_tos, 0);
2390     __ b(not_taken, convNegCond(cc));
2391   }
2392 #else
2393   __ cmp_32(R0_tos, 0);
2394   __ b(not_taken, convNegCond(cc));
2395 #endif // AARCH64
2396   branch(false, false);
2397   __ bind(not_taken);
2398   __ profile_not_taken_branch(R0_tmp);
2399 }
2400 
2401 
2402 void TemplateTable::if_icmp(Condition cc) {
2403   transition(itos, vtos);
2404   // assume branch is more often taken than not (loops use backward branches)
2405   Label not_taken;
2406   __ pop_i(R1_tmp);
2407   __ cmp_32(R1_tmp, R0_tos);
2408   __ b(not_taken, convNegCond(cc));
2409   branch(false, false);
2410   __ bind(not_taken);
2411   __ profile_not_taken_branch(R0_tmp);
2412 }
2413 
2414 
2415 void TemplateTable::if_nullcmp(Condition cc) {
2416   transition(atos, vtos);
2417   assert(cc == equal || cc == not_equal, "invalid condition");
2418 
2419   // assume branch is more often taken than not (loops use backward branches)
2420   Label not_taken;
2421   if (cc == equal) {
2422     __ cbnz(R0_tos, not_taken);
2423   } else {
2424     __ cbz(R0_tos, not_taken);
2425   }
2426   branch(false, false);
2427   __ bind(not_taken);
2428   __ profile_not_taken_branch(R0_tmp);
2429 }
2430 
2431 
2432 void TemplateTable::if_acmp(Condition cc) {
2433   transition(atos, vtos);
2434   // assume branch is more often taken than not (loops use backward branches)
2435   Label not_taken;
2436   __ pop_ptr(R1_tmp);
2437   __ cmp(R1_tmp, R0_tos);
2438   __ b(not_taken, convNegCond(cc));
2439   branch(false, false);
2440   __ bind(not_taken);
2441   __ profile_not_taken_branch(R0_tmp);
2442 }
2443 
2444 
2445 void TemplateTable::ret() {
2446   transition(vtos, vtos);
2447   const Register Rlocal_index = R1_tmp;
2448   const Register Rret_bci = Rtmp_save0; // R4/R19
2449 
2450   locals_index(Rlocal_index);
2451   Address local = load_iaddress(Rlocal_index, Rtemp);
2452   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2453   __ profile_ret(Rtmp_save1, Rret_bci);
2454   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2455   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2456   __ add(Rbcp, Rtemp, Rret_bci);
2457   __ dispatch_next(vtos);
2458 }
2459 
2460 
2461 void TemplateTable::wide_ret() {
2462   transition(vtos, vtos);
2463   const Register Rlocal_index = R1_tmp;
2464   const Register Rret_bci = Rtmp_save0; // R4/R19
2465 
2466   locals_index_wide(Rlocal_index);
2467   Address local = load_iaddress(Rlocal_index, Rtemp);
2468   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2469   __ profile_ret(Rtmp_save1, Rret_bci);
2470   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2471   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2472   __ add(Rbcp, Rtemp, Rret_bci);
2473   __ dispatch_next(vtos);
2474 }
2475 
2476 
2477 void TemplateTable::tableswitch() {
2478   transition(itos, vtos);
2479 
2480   const Register Rindex  = R0_tos;
2481 #ifndef AARCH64
2482   const Register Rtemp2  = R1_tmp;
2483 #endif // !AARCH64
2484   const Register Rabcp   = R2_tmp;  // aligned bcp
2485   const Register Rlow    = R3_tmp;
2486   const Register Rhigh   = R4_tmp;
2487   const Register Roffset = R5_tmp;
2488 
2489   // align bcp
2490   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2491   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2492 
2493   // load lo & hi
2494 #ifdef AARCH64
2495   __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2496 #else
2497   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2498 #endif // AARCH64
2499   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2500   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2501 
2502   // compare index with high bound
2503   __ cmp_32(Rhigh, Rindex);
2504 
2505 #ifdef AARCH64
2506   Label default_case, do_dispatch;
2507   __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2508   __ b(default_case, lt);
2509 
2510   __ sub_w(Rindex, Rindex, Rlow);
2511   __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2512   if(ProfileInterpreter) {
2513     __ sxtw(Rindex, Rindex);
2514     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2515   }
2516   __ b(do_dispatch);
2517 
2518   __ bind(default_case);
2519   __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2520   if(ProfileInterpreter) {
2521     __ profile_switch_default(R0_tmp);
2522   }
2523 
2524   __ bind(do_dispatch);
2525 #else
2526 
2527   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2528   __ subs(Rindex, Rindex, Rlow, ge);
2529 
2530   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2531   // ("ge" status accumulated from cmp and subs instructions) then load
2532   // offset from table, otherwise load offset for default case
2533 
2534   if(ProfileInterpreter) {
2535     Label default_case, continue_execution;
2536 
2537     __ b(default_case, lt);
2538     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2539     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2540     __ b(continue_execution);
2541 
2542     __ bind(default_case);
2543     __ profile_switch_default(R0_tmp);
2544     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2545 
2546     __ bind(continue_execution);
2547   } else {
2548     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2549     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2550   }
2551 #endif // AARCH64
2552 
2553   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2554 
2555   // load the next bytecode to R3_bytecode and advance Rbcp
2556 #ifdef AARCH64
2557   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2558   __ ldrb(R3_bytecode, Address(Rbcp));
2559 #else
2560   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2561 #endif // AARCH64
2562   __ dispatch_only(vtos);
2563 
2564 }
2565 
2566 
2567 void TemplateTable::lookupswitch() {
2568   transition(itos, itos);
2569   __ stop("lookupswitch bytecode should have been rewritten");
2570 }
2571 
2572 
2573 void TemplateTable::fast_linearswitch() {
2574   transition(itos, vtos);
2575   Label loop, found, default_case, continue_execution;
2576 
2577   const Register Rkey     = R0_tos;
2578   const Register Rabcp    = R2_tmp;  // aligned bcp
2579   const Register Rdefault = R3_tmp;
2580   const Register Rcount   = R4_tmp;
2581   const Register Roffset  = R5_tmp;
2582 
2583   // bswap Rkey, so we can avoid bswapping the table entries
2584   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2585 
2586   // align bcp
2587   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2588   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2589 
2590   // load default & counter
2591 #ifdef AARCH64
2592   __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2593 #else
2594   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2595 #endif // AARCH64
2596   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2597 
2598 #ifdef AARCH64
2599   __ cbz_w(Rcount, default_case);
2600 #else
2601   __ cmp_32(Rcount, 0);
2602   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2603   __ b(default_case, eq);
2604 #endif // AARCH64
2605 
2606   // table search
2607   __ bind(loop);
2608 #ifdef AARCH64
2609   __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2610 #endif // AARCH64
2611   __ cmp_32(Rtemp, Rkey);
2612   __ b(found, eq);
2613   __ subs(Rcount, Rcount, 1);
2614 #ifndef AARCH64
2615   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2616 #endif // !AARCH64
2617   __ b(loop, ne);
2618 
2619   // default case
2620   __ bind(default_case);
2621   __ profile_switch_default(R0_tmp);
2622   __ mov(Roffset, Rdefault);
2623   __ b(continue_execution);
2624 
2625   // entry found -> get offset
2626   __ bind(found);
2627   // Rabcp is already incremented and points to the next entry
2628   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2629   if (ProfileInterpreter) {
2630     // Calculate index of the selected case.
2631     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2632 
2633     // align bcp
2634     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2635     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2636 
2637     // load number of cases
2638     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2639     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2640 
2641     // Selected index = <number of cases> - <current loop count>
2642     __ sub(R1_tmp, R2_tmp, Rcount);
2643     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2644   }
2645 
2646   // continue execution
2647   __ bind(continue_execution);
2648   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2649 
2650   // load the next bytecode to R3_bytecode and advance Rbcp
2651 #ifdef AARCH64
2652   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2653   __ ldrb(R3_bytecode, Address(Rbcp));
2654 #else
2655   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2656 #endif // AARCH64
2657   __ dispatch_only(vtos);
2658 }
2659 
2660 
2661 void TemplateTable::fast_binaryswitch() {
2662   transition(itos, vtos);
2663   // Implementation using the following core algorithm:
2664   //
2665   // int binary_search(int key, LookupswitchPair* array, int n) {
2666   //   // Binary search according to "Methodik des Programmierens" by
2667   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2668   //   int i = 0;
2669   //   int j = n;
2670   //   while (i+1 < j) {
2671   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2672   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2673   //     // where a stands for the array and assuming that the (inexisting)
2674   //     // element a[n] is infinitely big.
2675   //     int h = (i + j) >> 1;
2676   //     // i < h < j
2677   //     if (key < array[h].fast_match()) {
2678   //       j = h;
2679   //     } else {
2680   //       i = h;
2681   //     }
2682   //   }
2683   //   // R: a[i] <= key < a[i+1] or Q
2684   //   // (i.e., if key is within array, i is the correct index)
2685   //   return i;
2686   // }
2687 
2688   // register allocation
2689   const Register key    = R0_tos;                // already set (tosca)
2690   const Register array  = R1_tmp;
2691   const Register i      = R2_tmp;
2692   const Register j      = R3_tmp;
2693   const Register h      = R4_tmp;
2694   const Register val    = R5_tmp;
2695   const Register temp1  = Rtemp;
2696   const Register temp2  = LR_tmp;
2697   const Register offset = R3_tmp;
2698 
2699   // set 'array' = aligned bcp + 2 ints
2700   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2701   __ align_reg(array, temp1, BytesPerInt);
2702 
2703   // initialize i & j
2704   __ mov(i, 0);                                  // i = 0;
2705   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2706   // Convert j into native byteordering
2707   __ byteswap_u32(j, temp1, temp2);
2708 
2709   // and start
2710   Label entry;
2711   __ b(entry);
2712 
2713   // binary search loop
2714   { Label loop;
2715     __ bind(loop);
2716     // int h = (i + j) >> 1;
2717     __ add(h, i, j);                             // h = i + j;
2718     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2719     // if (key < array[h].fast_match()) {
2720     //   j = h;
2721     // } else {
2722     //   i = h;
2723     // }
2724 #ifdef AARCH64
2725     __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2726     __ ldr_s32(val, Address(temp1));
2727 #else
2728     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2729 #endif // AARCH64
2730     // Convert array[h].match to native byte-ordering before compare
2731     __ byteswap_u32(val, temp1, temp2);
2732     __ cmp_32(key, val);
2733     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2734     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2735     // while (i+1 < j)
2736     __ bind(entry);
2737     __ add(temp1, i, 1);                             // i+1
2738     __ cmp(temp1, j);                                // i+1 < j
2739     __ b(loop, lt);
2740   }
2741 
2742   // end of binary search, result index is i (must check again!)
2743   Label default_case;
2744   // Convert array[i].match to native byte-ordering before compare
2745 #ifdef AARCH64
2746   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2747   __ ldr_s32(val, Address(temp1));
2748 #else
2749   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2750 #endif // AARCH64
2751   __ byteswap_u32(val, temp1, temp2);
2752   __ cmp_32(key, val);
2753   __ b(default_case, ne);
2754 
2755   // entry found
2756   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2757   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2758   __ profile_switch_case(R0, i, R1, i);
2759   __ byteswap_u32(offset, temp1, temp2);
2760 #ifdef AARCH64
2761   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2762   __ ldrb(R3_bytecode, Address(Rbcp));
2763 #else
2764   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2765 #endif // AARCH64
2766   __ dispatch_only(vtos);
2767 
2768   // default case
2769   __ bind(default_case);
2770   __ profile_switch_default(R0);
2771   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2772   __ byteswap_u32(offset, temp1, temp2);
2773 #ifdef AARCH64
2774   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2775   __ ldrb(R3_bytecode, Address(Rbcp));
2776 #else
2777   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2778 #endif // AARCH64
2779   __ dispatch_only(vtos);
2780 }
2781 
2782 
2783 void TemplateTable::_return(TosState state) {
2784   transition(state, state);
2785   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2786 
2787   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2788     Label skip_register_finalizer;
2789     assert(state == vtos, "only valid state");
2790     __ ldr(R1, aaddress(0));
2791     __ load_klass(Rtemp, R1);
2792     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2793     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2794 
2795     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2796 
2797     __ bind(skip_register_finalizer);
2798   }
2799 
2800   // Narrow result if state is itos but result type is smaller.
2801   // Need to narrow in the return bytecode rather than in generate_return_entry
2802   // since compiled code callers expect the result to already be narrowed.
2803   if (state == itos) {
2804     __ narrow(R0_tos);
2805   }
2806   __ remove_activation(state, LR);
2807 
2808   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2809 
2810 #ifndef AARCH64
2811   // According to interpreter calling conventions, result is returned in R0/R1,
2812   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2813   // This conversion should be done after remove_activation, as it uses
2814   // push(state) & pop(state) to preserve return value.
2815   __ convert_tos_to_retval(state);
2816 #endif // !AARCH64
2817 
2818   __ ret();
2819 
2820   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2821   __ nop();
2822 }
2823 
2824 
2825 // ----------------------------------------------------------------------------
2826 // Volatile variables demand their effects be made known to all CPU's in
2827 // order.  Store buffers on most chips allow reads & writes to reorder; the
2828 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2829 // memory barrier (i.e., it's not sufficient that the interpreter does not
2830 // reorder volatile references, the hardware also must not reorder them).
2831 //
2832 // According to the new Java Memory Model (JMM):
2833 // (1) All volatiles are serialized wrt to each other.
2834 // ALSO reads & writes act as aquire & release, so:
2835 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2836 // the read float up to before the read.  It's OK for non-volatile memory refs
2837 // that happen before the volatile read to float down below it.
2838 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2839 // that happen BEFORE the write float down to after the write.  It's OK for
2840 // non-volatile memory refs that happen after the volatile write to float up
2841 // before it.
2842 //
2843 // We only put in barriers around volatile refs (they are expensive), not
2844 // _between_ memory refs (that would require us to track the flavor of the
2845 // previous memory refs).  Requirements (2) and (3) require some barriers
2846 // before volatile stores and after volatile loads.  These nearly cover
2847 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2848 // case is placed after volatile-stores although it could just as well go
2849 // before volatile-loads.
2850 // TODO-AARCH64: consider removing extra unused parameters
2851 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2852                                      Register tmp,
2853                                      bool preserve_flags,
2854                                      Register load_tgt) {
2855 #ifdef AARCH64
2856   __ membar(order_constraint);
2857 #else
2858   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2859 #endif
2860 }
2861 
2862 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2863 void TemplateTable::resolve_cache_and_index(int byte_no,
2864                                             Register Rcache,
2865                                             Register Rindex,
2866                                             size_t index_size) {
2867   assert_different_registers(Rcache, Rindex, Rtemp);
2868 
2869   Label resolved;
2870   Bytecodes::Code code = bytecode();
2871   switch (code) {
2872   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2873   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2874   }
2875 
2876   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2877   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
2878   __ cmp(Rtemp, code);  // have we resolved this bytecode?
2879   __ b(resolved, eq);
2880 
2881   // resolve first time through
2882   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2883   __ mov(R1, code);
2884   __ call_VM(noreg, entry, R1);
2885   // Update registers with resolved info
2886   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
2887   __ bind(resolved);
2888 }
2889 
2890 
2891 // The Rcache and Rindex registers must be set before call
2892 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
2893                                               Register Rindex,
2894                                               Register Roffset,
2895                                               Register Rflags,
2896                                               Register Robj,
2897                                               bool is_static = false) {
2898 
2899   assert_different_registers(Rcache, Rindex, Rtemp);
2900   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
2901 
2902   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2903 
2904   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2905 
2906   // Field offset
2907   __ ldr(Roffset, Address(Rtemp,
2908            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2909 
2910   // Flags
2911   __ ldr_u32(Rflags, Address(Rtemp,
2912            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2913 
2914   if (is_static) {
2915     __ ldr(Robj, Address(Rtemp,
2916              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2917     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2918     __ ldr(Robj, Address(Robj, mirror_offset));
2919     __ resolve_oop_handle(Robj);
2920   }
2921 }
2922 
2923 
2924 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2925 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2926                                                Register method,
2927                                                Register itable_index,
2928                                                Register flags,
2929                                                bool is_invokevirtual,
2930                                                bool is_invokevfinal/*unused*/,
2931                                                bool is_invokedynamic) {
2932   // setup registers
2933   const Register cache = R2_tmp;
2934   const Register index = R3_tmp;
2935   const Register temp_reg = Rtemp;
2936   assert_different_registers(cache, index, temp_reg);
2937   assert_different_registers(method, itable_index, temp_reg);
2938 
2939   // determine constant pool cache field offsets
2940   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2941   const int method_offset = in_bytes(
2942     ConstantPoolCache::base_offset() +
2943       ((byte_no == f2_byte)
2944        ? ConstantPoolCacheEntry::f2_offset()
2945        : ConstantPoolCacheEntry::f1_offset()
2946       )
2947     );
2948   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2949                                     ConstantPoolCacheEntry::flags_offset());
2950   // access constant pool cache fields
2951   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2952                                     ConstantPoolCacheEntry::f2_offset());
2953 
2954   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2955   resolve_cache_and_index(byte_no, cache, index, index_size);
2956     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
2957     __ ldr(method, Address(temp_reg, method_offset));
2958 
2959   if (itable_index != noreg) {
2960     __ ldr(itable_index, Address(temp_reg, index_offset));
2961   }
2962   __ ldr_u32(flags, Address(temp_reg, flags_offset));
2963 }
2964 
2965 
2966 // The registers cache and index expected to be set before call, and should not be Rtemp.
2967 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
2968 // except cache and index registers which are preserved.
2969 void TemplateTable::jvmti_post_field_access(Register Rcache,
2970                                             Register Rindex,
2971                                             bool is_static,
2972                                             bool has_tos) {
2973   assert_different_registers(Rcache, Rindex, Rtemp);
2974 
2975   if (__ can_post_field_access()) {
2976     // Check to see if a field access watch has been set before we take
2977     // the time to call into the VM.
2978 
2979     Label Lcontinue;
2980 
2981     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
2982     __ cbz(Rtemp, Lcontinue);
2983 
2984     // cache entry pointer
2985     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2986     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
2987     if (is_static) {
2988       __ mov(R1, 0);        // NULL object reference
2989     } else {
2990       __ pop(atos);         // Get the object
2991       __ mov(R1, R0_tos);
2992       __ verify_oop(R1);
2993       __ push(atos);        // Restore stack state
2994     }
2995     // R1: object pointer or NULL
2996     // R2: cache entry pointer
2997     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2998                R1, R2);
2999     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3000 
3001     __ bind(Lcontinue);
3002   }
3003 }
3004 
3005 
3006 void TemplateTable::pop_and_check_object(Register r) {
3007   __ pop_ptr(r);
3008   __ null_check(r, Rtemp);  // for field access must check obj.
3009   __ verify_oop(r);
3010 }
3011 
3012 
3013 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3014   transition(vtos, vtos);
3015 
3016   const Register Roffset  = R2_tmp;
3017   const Register Robj     = R3_tmp;
3018   const Register Rcache   = R4_tmp;
3019   const Register Rflagsav = Rtmp_save0;  // R4/R19
3020   const Register Rindex   = R5_tmp;
3021   const Register Rflags   = R5_tmp;
3022 
3023   const bool gen_volatile_check = os::is_MP();
3024 
3025   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3026   jvmti_post_field_access(Rcache, Rindex, is_static, false);
3027   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3028 
3029   if (gen_volatile_check) {
3030     __ mov(Rflagsav, Rflags);
3031   }
3032 
3033   if (!is_static) pop_and_check_object(Robj);
3034 
3035   Label Done, Lint, Ltable, shouldNotReachHere;
3036   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3037 
3038   // compute type
3039   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3040   // Make sure we don't need to mask flags after the above shift
3041   ConstantPoolCacheEntry::verify_tos_state_shift();
3042 
3043   // There are actually two versions of implementation of getfield/getstatic:
3044   //
3045   // 32-bit ARM:
3046   // 1) Table switch using add(PC,...) instruction (fast_version)
3047   // 2) Table switch using ldr(PC,...) instruction
3048   //
3049   // AArch64:
3050   // 1) Table switch using adr/add/br instructions (fast_version)
3051   // 2) Table switch using adr/ldr/br instructions
3052   //
3053   // First version requires fixed size of code block for each case and
3054   // can not be used in RewriteBytecodes and VerifyOops
3055   // modes.
3056 
3057   // Size of fixed size code block for fast_version
3058   const int log_max_block_size = 2;
3059   const int max_block_size = 1 << log_max_block_size;
3060 
3061   // Decide if fast version is enabled
3062   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3063 
3064   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3065   // atos requires additional processing in slow version.
3066   // On AArch64 atos and itos cannot be merged.
3067   bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3068 
3069   assert(number_of_states == 10, "number of tos states should be equal to 9");
3070 
3071   __ cmp(Rflags, itos);
3072 #ifdef AARCH64
3073   __ b(Lint, eq);
3074 
3075   if(fast_version) {
3076     __ adr(Rtemp, Lbtos);
3077     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3078     __ br(Rtemp);
3079   } else {
3080     __ adr(Rtemp, Ltable);
3081     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3082     __ br(Rtemp);
3083   }
3084 #else
3085   if(atos_merged_with_itos) {
3086     __ cmp(Rflags, atos, ne);
3087   }
3088 
3089   // table switch by type
3090   if(fast_version) {
3091     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3092   } else {
3093     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3094   }
3095 
3096   // jump to itos/atos case
3097   __ b(Lint);
3098 #endif // AARCH64
3099 
3100   // table with addresses for slow version
3101   if (fast_version) {
3102     // nothing to do
3103   } else  {
3104     AARCH64_ONLY(__ align(wordSize));
3105     __ bind(Ltable);
3106     __ emit_address(Lbtos);
3107     __ emit_address(Lztos);
3108     __ emit_address(Lctos);
3109     __ emit_address(Lstos);
3110     __ emit_address(Litos);
3111     __ emit_address(Lltos);
3112     __ emit_address(Lftos);
3113     __ emit_address(Ldtos);
3114     __ emit_address(Latos);
3115   }
3116 
3117 #ifdef ASSERT
3118   int seq = 0;
3119 #endif
3120   // btos
3121   {
3122     assert(btos == seq++, "btos has unexpected value");
3123     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3124     __ bind(Lbtos);
3125     __ ldrsb(R0_tos, Address(Robj, Roffset));
3126     __ push(btos);
3127     // Rewrite bytecode to be faster
3128     if (!is_static && rc == may_rewrite) {
3129       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3130     }
3131     __ b(Done);
3132   }
3133 
3134   // ztos (same as btos for getfield)
3135   {
3136     assert(ztos == seq++, "btos has unexpected value");
3137     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3138     __ bind(Lztos);
3139     __ ldrsb(R0_tos, Address(Robj, Roffset));
3140     __ push(ztos);
3141     // Rewrite bytecode to be faster (use btos fast getfield)
3142     if (!is_static && rc == may_rewrite) {
3143       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3144     }
3145     __ b(Done);
3146   }
3147 
3148   // ctos
3149   {
3150     assert(ctos == seq++, "ctos has unexpected value");
3151     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3152     __ bind(Lctos);
3153     __ ldrh(R0_tos, Address(Robj, Roffset));
3154     __ push(ctos);
3155     if (!is_static && rc == may_rewrite) {
3156       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3157     }
3158     __ b(Done);
3159   }
3160 
3161   // stos
3162   {
3163     assert(stos == seq++, "stos has unexpected value");
3164     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3165     __ bind(Lstos);
3166     __ ldrsh(R0_tos, Address(Robj, Roffset));
3167     __ push(stos);
3168     if (!is_static && rc == may_rewrite) {
3169       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3170     }
3171     __ b(Done);
3172   }
3173 
3174   // itos
3175   {
3176     assert(itos == seq++, "itos has unexpected value");
3177     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3178     __ bind(Litos);
3179     __ b(shouldNotReachHere);
3180   }
3181 
3182   // ltos
3183   {
3184     assert(ltos == seq++, "ltos has unexpected value");
3185     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3186     __ bind(Lltos);
3187 #ifdef AARCH64
3188     __ ldr(R0_tos, Address(Robj, Roffset));
3189 #else
3190     __ add(Roffset, Robj, Roffset);
3191     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3192 #endif // AARCH64
3193     __ push(ltos);
3194     if (!is_static && rc == may_rewrite) {
3195       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3196     }
3197     __ b(Done);
3198   }
3199 
3200   // ftos
3201   {
3202     assert(ftos == seq++, "ftos has unexpected value");
3203     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3204     __ bind(Lftos);
3205     // floats and ints are placed on stack in same way, so
3206     // we can use push(itos) to transfer value without using VFP
3207     __ ldr_u32(R0_tos, Address(Robj, Roffset));
3208     __ push(itos);
3209     if (!is_static && rc == may_rewrite) {
3210       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3211     }
3212     __ b(Done);
3213   }
3214 
3215   // dtos
3216   {
3217     assert(dtos == seq++, "dtos has unexpected value");
3218     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3219     __ bind(Ldtos);
3220     // doubles and longs are placed on stack in the same way, so
3221     // we can use push(ltos) to transfer value without using VFP
3222 #ifdef AARCH64
3223     __ ldr(R0_tos, Address(Robj, Roffset));
3224 #else
3225     __ add(Rtemp, Robj, Roffset);
3226     __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3227 #endif // AARCH64
3228     __ push(ltos);
3229     if (!is_static && rc == may_rewrite) {
3230       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3231     }
3232     __ b(Done);
3233   }
3234 
3235   // atos
3236   {
3237     assert(atos == seq++, "atos has unexpected value");
3238 
3239     // atos case for AArch64 and slow version on 32-bit ARM
3240     if(!atos_merged_with_itos) {
3241       __ bind(Latos);
3242       do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3243       __ push(atos);
3244       // Rewrite bytecode to be faster
3245       if (!is_static && rc == may_rewrite) {
3246         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3247       }
3248       __ b(Done);
3249     }
3250   }
3251 
3252   assert(vtos == seq++, "vtos has unexpected value");
3253 
3254   __ bind(shouldNotReachHere);
3255   __ should_not_reach_here();
3256 
3257   // itos and atos cases are frequent so it makes sense to move them out of table switch
3258   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3259 
3260   __ bind(Lint);
3261   __ ldr_s32(R0_tos, Address(Robj, Roffset));
3262   __ push(itos);
3263   // Rewrite bytecode to be faster
3264   if (!is_static && rc == may_rewrite) {
3265     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3266   }
3267 
3268   __ bind(Done);
3269 
3270   if (gen_volatile_check) {
3271     // Check for volatile field
3272     Label notVolatile;
3273     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3274 
3275     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3276 
3277     __ bind(notVolatile);
3278   }
3279 
3280 }
3281 
3282 void TemplateTable::getfield(int byte_no) {
3283   getfield_or_static(byte_no, false);
3284 }
3285 
3286 void TemplateTable::nofast_getfield(int byte_no) {
3287   getfield_or_static(byte_no, false, may_not_rewrite);
3288 }
3289 
3290 void TemplateTable::getstatic(int byte_no) {
3291   getfield_or_static(byte_no, true);
3292 }
3293 
3294 
3295 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3296 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3297 // except cache and index registers which are preserved.
3298 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3299   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3300   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3301 
3302   if (__ can_post_field_modification()) {
3303     // Check to see if a field modification watch has been set before we take
3304     // the time to call into the VM.
3305     Label Lcontinue;
3306 
3307     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3308     __ cbz(Rtemp, Lcontinue);
3309 
3310     if (is_static) {
3311       // Life is simple.  Null out the object pointer.
3312       __ mov(R1, 0);
3313     } else {
3314       // Life is harder. The stack holds the value on top, followed by the object.
3315       // We don't know the size of the value, though; it could be one or two words
3316       // depending on its type. As a result, we must find the type to determine where
3317       // the object is.
3318 
3319       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3320       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3321 
3322       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3323       // Make sure we don't need to mask Rtemp after the above shift
3324       ConstantPoolCacheEntry::verify_tos_state_shift();
3325 
3326       __ cmp(Rtemp, ltos);
3327       __ cond_cmp(Rtemp, dtos, ne);
3328 #ifdef AARCH64
3329       __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3330       __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3331       __ mov(R1, Rtemp, eq);
3332       __ ldr(R1, Address(Rstack_top, R1));
3333 #else
3334       // two word value (ltos/dtos)
3335       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3336 
3337       // one word value (not ltos, dtos)
3338       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3339 #endif // AARCH64
3340     }
3341 
3342     // cache entry pointer
3343     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3344     __ add(R2, R2, in_bytes(cp_base_offset));
3345 
3346     // object (tos)
3347     __ mov(R3, Rstack_top);
3348 
3349     // R1: object pointer set up above (NULL if static)
3350     // R2: cache entry pointer
3351     // R3: value object on the stack
3352     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3353                R1, R2, R3);
3354     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3355 
3356     __ bind(Lcontinue);
3357   }
3358 }
3359 
3360 
3361 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3362   transition(vtos, vtos);
3363 
3364   const Register Roffset  = R2_tmp;
3365   const Register Robj     = R3_tmp;
3366   const Register Rcache   = R4_tmp;
3367   const Register Rflagsav = Rtmp_save0;  // R4/R19
3368   const Register Rindex   = R5_tmp;
3369   const Register Rflags   = R5_tmp;
3370 
3371   const bool gen_volatile_check = os::is_MP();
3372 
3373   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3374   jvmti_post_field_mod(Rcache, Rindex, is_static);
3375   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3376 
3377   if (gen_volatile_check) {
3378     // Check for volatile field
3379     Label notVolatile;
3380     __ mov(Rflagsav, Rflags);
3381     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3382 
3383     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3384 
3385     __ bind(notVolatile);
3386   }
3387 
3388   Label Done, Lint, shouldNotReachHere;
3389   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3390 
3391   // compute type
3392   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3393   // Make sure we don't need to mask flags after the above shift
3394   ConstantPoolCacheEntry::verify_tos_state_shift();
3395 
3396   // There are actually two versions of implementation of putfield/putstatic:
3397   //
3398   // 32-bit ARM:
3399   // 1) Table switch using add(PC,...) instruction (fast_version)
3400   // 2) Table switch using ldr(PC,...) instruction
3401   //
3402   // AArch64:
3403   // 1) Table switch using adr/add/br instructions (fast_version)
3404   // 2) Table switch using adr/ldr/br instructions
3405   //
3406   // First version requires fixed size of code block for each case and
3407   // can not be used in RewriteBytecodes and VerifyOops
3408   // modes.
3409 
3410   // Size of fixed size code block for fast_version (in instructions)
3411   const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3412   const int max_block_size = 1 << log_max_block_size;
3413 
3414   // Decide if fast version is enabled
3415   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3416 
3417   assert(number_of_states == 10, "number of tos states should be equal to 9");
3418 
3419   // itos case is frequent and is moved outside table switch
3420   __ cmp(Rflags, itos);
3421 
3422 #ifdef AARCH64
3423   __ b(Lint, eq);
3424 
3425   if (fast_version) {
3426     __ adr(Rtemp, Lbtos);
3427     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3428     __ br(Rtemp);
3429   } else {
3430     __ adr(Rtemp, Ltable);
3431     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3432     __ br(Rtemp);
3433   }
3434 #else
3435   // table switch by type
3436   if (fast_version) {
3437     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3438   } else  {
3439     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3440   }
3441 
3442   // jump to itos case
3443   __ b(Lint);
3444 #endif // AARCH64
3445 
3446   // table with addresses for slow version
3447   if (fast_version) {
3448     // nothing to do
3449   } else  {
3450     AARCH64_ONLY(__ align(wordSize));
3451     __ bind(Ltable);
3452     __ emit_address(Lbtos);
3453     __ emit_address(Lztos);
3454     __ emit_address(Lctos);
3455     __ emit_address(Lstos);
3456     __ emit_address(Litos);
3457     __ emit_address(Lltos);
3458     __ emit_address(Lftos);
3459     __ emit_address(Ldtos);
3460     __ emit_address(Latos);
3461   }
3462 
3463 #ifdef ASSERT
3464   int seq = 0;
3465 #endif
3466   // btos
3467   {
3468     assert(btos == seq++, "btos has unexpected value");
3469     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3470     __ bind(Lbtos);
3471     __ pop(btos);
3472     if (!is_static) pop_and_check_object(Robj);
3473     __ strb(R0_tos, Address(Robj, Roffset));
3474     if (!is_static && rc == may_rewrite) {
3475       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3476     }
3477     __ b(Done);
3478   }
3479 
3480   // ztos
3481   {
3482     assert(ztos == seq++, "ztos has unexpected value");
3483     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3484     __ bind(Lztos);
3485     __ pop(ztos);
3486     if (!is_static) pop_and_check_object(Robj);
3487     __ and_32(R0_tos, R0_tos, 1);
3488     __ strb(R0_tos, Address(Robj, Roffset));
3489     if (!is_static && rc == may_rewrite) {
3490       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3491     }
3492     __ b(Done);
3493   }
3494 
3495   // ctos
3496   {
3497     assert(ctos == seq++, "ctos has unexpected value");
3498     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3499     __ bind(Lctos);
3500     __ pop(ctos);
3501     if (!is_static) pop_and_check_object(Robj);
3502     __ strh(R0_tos, Address(Robj, Roffset));
3503     if (!is_static && rc == may_rewrite) {
3504       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3505     }
3506     __ b(Done);
3507   }
3508 
3509   // stos
3510   {
3511     assert(stos == seq++, "stos has unexpected value");
3512     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3513     __ bind(Lstos);
3514     __ pop(stos);
3515     if (!is_static) pop_and_check_object(Robj);
3516     __ strh(R0_tos, Address(Robj, Roffset));
3517     if (!is_static && rc == may_rewrite) {
3518       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3519     }
3520     __ b(Done);
3521   }
3522 
3523   // itos
3524   {
3525     assert(itos == seq++, "itos has unexpected value");
3526     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3527     __ bind(Litos);
3528     __ b(shouldNotReachHere);
3529   }
3530 
3531   // ltos
3532   {
3533     assert(ltos == seq++, "ltos has unexpected value");
3534     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3535     __ bind(Lltos);
3536     __ pop(ltos);
3537     if (!is_static) pop_and_check_object(Robj);
3538 #ifdef AARCH64
3539     __ str(R0_tos, Address(Robj, Roffset));
3540 #else
3541     __ add(Roffset, Robj, Roffset);
3542     __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3543 #endif // AARCH64
3544     if (!is_static && rc == may_rewrite) {
3545       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3546     }
3547     __ b(Done);
3548   }
3549 
3550   // ftos
3551   {
3552     assert(ftos == seq++, "ftos has unexpected value");
3553     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3554     __ bind(Lftos);
3555     // floats and ints are placed on stack in the same way, so
3556     // we can use pop(itos) to transfer value without using VFP
3557     __ pop(itos);
3558     if (!is_static) pop_and_check_object(Robj);
3559     __ str_32(R0_tos, Address(Robj, Roffset));
3560     if (!is_static && rc == may_rewrite) {
3561       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3562     }
3563     __ b(Done);
3564   }
3565 
3566   // dtos
3567   {
3568     assert(dtos == seq++, "dtos has unexpected value");
3569     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3570     __ bind(Ldtos);
3571     // doubles and longs are placed on stack in the same way, so
3572     // we can use pop(ltos) to transfer value without using VFP
3573     __ pop(ltos);
3574     if (!is_static) pop_and_check_object(Robj);
3575 #ifdef AARCH64
3576     __ str(R0_tos, Address(Robj, Roffset));
3577 #else
3578     __ add(Rtemp, Robj, Roffset);
3579     __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3580 #endif // AARCH64
3581     if (!is_static && rc == may_rewrite) {
3582       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3583     }
3584     __ b(Done);
3585   }
3586 
3587   // atos
3588   {
3589     assert(atos == seq++, "dtos has unexpected value");
3590     __ bind(Latos);
3591     __ pop(atos);
3592     if (!is_static) pop_and_check_object(Robj);
3593     // Store into the field
3594     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3595     if (!is_static && rc == may_rewrite) {
3596       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3597     }
3598     __ b(Done);
3599   }
3600 
3601   __ bind(shouldNotReachHere);
3602   __ should_not_reach_here();
3603 
3604   // itos case is frequent and is moved outside table switch
3605   __ bind(Lint);
3606   __ pop(itos);
3607   if (!is_static) pop_and_check_object(Robj);
3608   __ str_32(R0_tos, Address(Robj, Roffset));
3609   if (!is_static && rc == may_rewrite) {
3610     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3611   }
3612 
3613   __ bind(Done);
3614 
3615   if (gen_volatile_check) {
3616     Label notVolatile;
3617     if (is_static) {
3618       // Just check for volatile. Memory barrier for static final field
3619       // is handled by class initialization.
3620       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3621       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3622       __ bind(notVolatile);
3623     } else {
3624       // Check for volatile field and final field
3625       Label skipMembar;
3626 
3627       __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3628                        1 << ConstantPoolCacheEntry::is_final_shift);
3629       __ b(skipMembar, eq);
3630 
3631       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3632 
3633       // StoreLoad barrier after volatile field write
3634       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3635       __ b(skipMembar);
3636 
3637       // StoreStore barrier after final field write
3638       __ bind(notVolatile);
3639       volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3640 
3641       __ bind(skipMembar);
3642     }
3643   }
3644 
3645 }
3646 
3647 void TemplateTable::putfield(int byte_no) {
3648   putfield_or_static(byte_no, false);
3649 }
3650 
3651 void TemplateTable::nofast_putfield(int byte_no) {
3652   putfield_or_static(byte_no, false, may_not_rewrite);
3653 }
3654 
3655 void TemplateTable::putstatic(int byte_no) {
3656   putfield_or_static(byte_no, true);
3657 }
3658 
3659 
3660 void TemplateTable::jvmti_post_fast_field_mod() {
3661   // This version of jvmti_post_fast_field_mod() is not used on ARM
3662   Unimplemented();
3663 }
3664 
3665 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3666 // but preserves tosca with the given state.
3667 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3668   if (__ can_post_field_modification()) {
3669     // Check to see if a field modification watch has been set before we take
3670     // the time to call into the VM.
3671     Label done;
3672 
3673     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3674     __ cbz(R2, done);
3675 
3676     __ pop_ptr(R3);               // copy the object pointer from tos
3677     __ verify_oop(R3);
3678     __ push_ptr(R3);              // put the object pointer back on tos
3679 
3680     __ push(state);               // save value on the stack
3681 
3682     // access constant pool cache entry
3683     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3684 
3685     __ mov(R1, R3);
3686     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3687     __ mov(R3, Rstack_top); // put tos addr into R3
3688 
3689     // R1: object pointer copied above
3690     // R2: cache entry pointer
3691     // R3: jvalue object on the stack
3692     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3693 
3694     __ pop(state);                // restore value
3695 
3696     __ bind(done);
3697   }
3698 }
3699 
3700 
3701 void TemplateTable::fast_storefield(TosState state) {
3702   transition(state, vtos);
3703 
3704   ByteSize base = ConstantPoolCache::base_offset();
3705 
3706   jvmti_post_fast_field_mod(state);
3707 
3708   const Register Rcache  = R2_tmp;
3709   const Register Rindex  = R3_tmp;
3710   const Register Roffset = R3_tmp;
3711   const Register Rflags  = Rtmp_save0; // R4/R19
3712   const Register Robj    = R5_tmp;
3713 
3714   const bool gen_volatile_check = os::is_MP();
3715 
3716   // access constant pool cache
3717   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3718 
3719   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3720 
3721   if (gen_volatile_check) {
3722     // load flags to test volatile
3723     __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3724   }
3725 
3726   // replace index with field offset from cache entry
3727   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3728 
3729   if (gen_volatile_check) {
3730     // Check for volatile store
3731     Label notVolatile;
3732     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3733 
3734     // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3735     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3736 
3737     __ bind(notVolatile);
3738   }
3739 
3740   // Get object from stack
3741   pop_and_check_object(Robj);
3742 
3743   // access field
3744   switch (bytecode()) {
3745     case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3746                                      // fall through
3747     case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3748     case Bytecodes::_fast_sputfield: // fall through
3749     case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3750     case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3751 #ifdef AARCH64
3752     case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
3753     case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3754     case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3755 #else
3756     case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3757                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3758 
3759 #ifdef __SOFTFP__
3760     case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
3761     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3762                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3763 #else
3764     case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3765                                      __ fsts(S0_tos, Address(Robj));          break;
3766     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3767                                      __ fstd(D0_tos, Address(Robj));          break;
3768 #endif // __SOFTFP__
3769 #endif // AARCH64
3770 
3771     case Bytecodes::_fast_aputfield:
3772       do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3773       break;
3774 
3775     default:
3776       ShouldNotReachHere();
3777   }
3778 
3779   if (gen_volatile_check) {
3780     Label notVolatile;
3781     Label skipMembar;
3782     __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3783                    1 << ConstantPoolCacheEntry::is_final_shift);
3784     __ b(skipMembar, eq);
3785 
3786     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3787 
3788     // StoreLoad barrier after volatile field write
3789     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3790     __ b(skipMembar);
3791 
3792     // StoreStore barrier after final field write
3793     __ bind(notVolatile);
3794     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3795 
3796     __ bind(skipMembar);
3797   }
3798 }
3799 
3800 
3801 void TemplateTable::fast_accessfield(TosState state) {
3802   transition(atos, state);
3803 
3804   // do the JVMTI work here to avoid disturbing the register state below
3805   if (__ can_post_field_access()) {
3806     // Check to see if a field access watch has been set before we take
3807     // the time to call into the VM.
3808     Label done;
3809     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3810     __ cbz(R2, done);
3811     // access constant pool cache entry
3812     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3813     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3814     __ verify_oop(R0_tos);
3815     __ mov(R1, R0_tos);
3816     // R1: object pointer copied above
3817     // R2: cache entry pointer
3818     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3819     __ pop_ptr(R0_tos);   // restore object pointer
3820 
3821     __ bind(done);
3822   }
3823 
3824   const Register Robj    = R0_tos;
3825   const Register Rcache  = R2_tmp;
3826   const Register Rflags  = R2_tmp;
3827   const Register Rindex  = R3_tmp;
3828   const Register Roffset = R3_tmp;
3829 
3830   const bool gen_volatile_check = os::is_MP();
3831 
3832   // access constant pool cache
3833   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3834   // replace index with field offset from cache entry
3835   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3836   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3837 
3838   if (gen_volatile_check) {
3839     // load flags to test volatile
3840     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3841   }
3842 
3843   __ verify_oop(Robj);
3844   __ null_check(Robj, Rtemp);
3845 
3846   // access field
3847   switch (bytecode()) {
3848     case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
3849     case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
3850     case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
3851     case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
3852 #ifdef AARCH64
3853     case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3854     case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
3855     case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
3856 #else
3857     case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
3858                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3859 #ifdef __SOFTFP__
3860     case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3861     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
3862                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3863 #else
3864     case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
3865     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
3866 #endif // __SOFTFP__
3867 #endif // AARCH64
3868     case Bytecodes::_fast_agetfield: do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
3869     default:
3870       ShouldNotReachHere();
3871   }
3872 
3873   if (gen_volatile_check) {
3874     // Check for volatile load
3875     Label notVolatile;
3876     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3877 
3878     // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
3879     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3880 
3881     __ bind(notVolatile);
3882   }
3883 }
3884 
3885 
3886 void TemplateTable::fast_xaccess(TosState state) {
3887   transition(vtos, state);
3888 
3889   const Register Robj = R1_tmp;
3890   const Register Rcache = R2_tmp;
3891   const Register Rindex = R3_tmp;
3892   const Register Roffset = R3_tmp;
3893   const Register Rflags = R4_tmp;
3894   Label done;
3895 
3896   // get receiver
3897   __ ldr(Robj, aaddress(0));
3898 
3899   // access constant pool cache
3900   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
3901   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3902   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3903 
3904   const bool gen_volatile_check = os::is_MP();
3905 
3906   if (gen_volatile_check) {
3907     // load flags to test volatile
3908     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3909   }
3910 
3911   // make sure exception is reported in correct bcp range (getfield is next instruction)
3912   __ add(Rbcp, Rbcp, 1);
3913   __ null_check(Robj, Rtemp);
3914   __ sub(Rbcp, Rbcp, 1);
3915 
3916 #ifdef AARCH64
3917   if (gen_volatile_check) {
3918     Label notVolatile;
3919     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3920 
3921     __ add(Rtemp, Robj, Roffset);
3922 
3923     if (state == itos) {
3924       __ ldar_w(R0_tos, Rtemp);
3925     } else if (state == atos) {
3926       if (UseCompressedOops) {
3927         __ ldar_w(R0_tos, Rtemp);
3928         __ decode_heap_oop(R0_tos);
3929       } else {
3930         __ ldar(R0_tos, Rtemp);
3931       }
3932       __ verify_oop(R0_tos);
3933     } else if (state == ftos) {
3934       __ ldar_w(R0_tos, Rtemp);
3935       __ fmov_sw(S0_tos, R0_tos);
3936     } else {
3937       ShouldNotReachHere();
3938     }
3939     __ b(done);
3940 
3941     __ bind(notVolatile);
3942   }
3943 #endif // AARCH64
3944 
3945   if (state == itos) {
3946     __ ldr_s32(R0_tos, Address(Robj, Roffset));
3947   } else if (state == atos) {
3948     do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3949     __ verify_oop(R0_tos);
3950   } else if (state == ftos) {
3951 #ifdef AARCH64
3952     __ ldr_s(S0_tos, Address(Robj, Roffset));
3953 #else
3954 #ifdef __SOFTFP__
3955     __ ldr(R0_tos, Address(Robj, Roffset));
3956 #else
3957     __ add(Roffset, Robj, Roffset);
3958     __ flds(S0_tos, Address(Roffset));
3959 #endif // __SOFTFP__
3960 #endif // AARCH64
3961   } else {
3962     ShouldNotReachHere();
3963   }
3964 
3965 #ifndef AARCH64
3966   if (gen_volatile_check) {
3967     // Check for volatile load
3968     Label notVolatile;
3969     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3970 
3971     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3972 
3973     __ bind(notVolatile);
3974   }
3975 #endif // !AARCH64
3976 
3977   __ bind(done);
3978 }
3979 
3980 
3981 
3982 //----------------------------------------------------------------------------------------------------
3983 // Calls
3984 
3985 void TemplateTable::count_calls(Register method, Register temp) {
3986   // implemented elsewhere
3987   ShouldNotReachHere();
3988 }
3989 
3990 
3991 void TemplateTable::prepare_invoke(int byte_no,
3992                                    Register method,  // linked method (or i-klass)
3993                                    Register index,   // itable index, MethodType, etc.
3994                                    Register recv,    // if caller wants to see it
3995                                    Register flags    // if caller wants to test it
3996                                    ) {
3997   // determine flags
3998   const Bytecodes::Code code = bytecode();
3999   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
4000   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
4001   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
4002   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
4003   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
4004   const bool load_receiver       = (recv != noreg);
4005   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4006   assert(recv  == noreg || recv  == R2, "");
4007   assert(flags == noreg || flags == R3, "");
4008 
4009   // setup registers & access constant pool cache
4010   if (recv  == noreg)  recv  = R2;
4011   if (flags == noreg)  flags = R3;
4012   const Register temp = Rtemp;
4013   const Register ret_type = R1_tmp;
4014   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4015 
4016   // save 'interpreter return address'
4017   __ save_bcp();
4018 
4019   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4020 
4021   // maybe push extra argument
4022   if (is_invokedynamic || is_invokehandle) {
4023     Label L_no_push;
4024     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4025     __ mov(temp, index);
4026     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4027     __ load_resolved_reference_at_index(index, temp);
4028     __ verify_oop(index);
4029     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
4030     __ bind(L_no_push);
4031   }
4032 
4033   // load receiver if needed (after extra argument is pushed so parameter size is correct)
4034   if (load_receiver) {
4035     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
4036     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4037     __ ldr(recv, recv_addr);
4038     __ verify_oop(recv);
4039   }
4040 
4041   // compute return type
4042   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4043   // Make sure we don't need to mask flags after the above shift
4044   ConstantPoolCacheEntry::verify_tos_state_shift();
4045   // load return address
4046   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4047     __ mov_slow(temp, table);
4048     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4049   }
4050 }
4051 
4052 
4053 void TemplateTable::invokevirtual_helper(Register index,
4054                                          Register recv,
4055                                          Register flags) {
4056 
4057   const Register recv_klass = R2_tmp;
4058 
4059   assert_different_registers(index, recv, flags, Rtemp);
4060   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4061 
4062   // Test for an invoke of a final method
4063   Label notFinal;
4064   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4065 
4066   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4067 
4068   // do the call - the index is actually the method to call
4069 
4070   // It's final, need a null check here!
4071   __ null_check(recv, Rtemp);
4072 
4073   // profile this call
4074   __ profile_final_call(R0_tmp);
4075 
4076   __ jump_from_interpreted(Rmethod);
4077 
4078   __ bind(notFinal);
4079 
4080   // get receiver klass
4081   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4082   __ load_klass(recv_klass, recv);
4083 
4084   // profile this call
4085   __ profile_virtual_call(R0_tmp, recv_klass);
4086 
4087   // get target Method* & entry point
4088   const int base = in_bytes(Klass::vtable_start_offset());
4089   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4090   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4091   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4092   __ jump_from_interpreted(Rmethod);
4093 }
4094 
4095 void TemplateTable::invokevirtual(int byte_no) {
4096   transition(vtos, vtos);
4097   assert(byte_no == f2_byte, "use this argument");
4098 
4099   const Register Rrecv  = R2_tmp;
4100   const Register Rflags = R3_tmp;
4101 
4102   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4103 
4104   // Rmethod: index
4105   // Rrecv:   receiver
4106   // Rflags:  flags
4107   // LR:      return address
4108 
4109   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4110 }
4111 
4112 
4113 void TemplateTable::invokespecial(int byte_no) {
4114   transition(vtos, vtos);
4115   assert(byte_no == f1_byte, "use this argument");
4116   const Register Rrecv  = R2_tmp;
4117   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4118   __ verify_oop(Rrecv);
4119   __ null_check(Rrecv, Rtemp);
4120   // do the call
4121   __ profile_call(Rrecv);
4122   __ jump_from_interpreted(Rmethod);
4123 }
4124 
4125 
4126 void TemplateTable::invokestatic(int byte_no) {
4127   transition(vtos, vtos);
4128   assert(byte_no == f1_byte, "use this argument");
4129   prepare_invoke(byte_no, Rmethod);
4130   // do the call
4131   __ profile_call(R2_tmp);
4132   __ jump_from_interpreted(Rmethod);
4133 }
4134 
4135 
4136 void TemplateTable::fast_invokevfinal(int byte_no) {
4137   transition(vtos, vtos);
4138   assert(byte_no == f2_byte, "use this argument");
4139   __ stop("fast_invokevfinal is not used on ARM");
4140 }
4141 
4142 
4143 void TemplateTable::invokeinterface(int byte_no) {
4144   transition(vtos, vtos);
4145   assert(byte_no == f1_byte, "use this argument");
4146 
4147   const Register Ritable = R1_tmp;
4148   const Register Rrecv   = R2_tmp;
4149   const Register Rinterf = R5_tmp;
4150   const Register Rindex  = R4_tmp;
4151   const Register Rflags  = R3_tmp;
4152   const Register Rklass  = R3_tmp;
4153 
4154   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4155 
4156   // Special case of invokeinterface called for virtual method of
4157   // java.lang.Object.  See cpCacheOop.cpp for details.
4158   // This code isn't produced by javac, but could be produced by
4159   // another compliant java compiler.
4160   Label notMethod;
4161   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
4162 
4163   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4164   __ bind(notMethod);
4165 
4166   // Get receiver klass into Rklass - also a null check
4167   __ load_klass(Rklass, Rrecv);
4168 
4169   Label no_such_interface;
4170 
4171   // Receiver subtype check against REFC.
4172   __ lookup_interface_method(// inputs: rec. class, interface
4173                              Rklass, Rinterf, noreg,
4174                              // outputs:  scan temp. reg1, scan temp. reg2
4175                              noreg, Ritable, Rtemp,
4176                              no_such_interface);
4177 
4178   // profile this call
4179   __ profile_virtual_call(R0_tmp, Rklass);
4180 
4181   // Get declaring interface class from method
4182   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4183   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4184   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4185 
4186   // Get itable index from method
4187   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4188   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4189   __ neg(Rindex, Rtemp);
4190 
4191   __ lookup_interface_method(// inputs: rec. class, interface
4192                              Rklass, Rinterf, Rindex,
4193                              // outputs:  scan temp. reg1, scan temp. reg2
4194                              Rmethod, Ritable, Rtemp,
4195                              no_such_interface);
4196 
4197   // Rmethod: Method* to call
4198 
4199   // Check for abstract method error
4200   // Note: This should be done more efficiently via a throw_abstract_method_error
4201   //       interpreter entry point and a conditional jump to it in case of a null
4202   //       method.
4203   { Label L;
4204     __ cbnz(Rmethod, L);
4205     // throw exception
4206     // note: must restore interpreter registers to canonical
4207     //       state for exception handling to work correctly!
4208     __ restore_method();
4209     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4210     // the call_VM checks for exception, so we should never return here.
4211     __ should_not_reach_here();
4212     __ bind(L);
4213   }
4214 
4215   // do the call
4216   __ jump_from_interpreted(Rmethod);
4217 
4218   // throw exception
4219   __ bind(no_such_interface);
4220   __ restore_method();
4221   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4222   // the call_VM checks for exception, so we should never return here.
4223   __ should_not_reach_here();
4224 }
4225 
4226 void TemplateTable::invokehandle(int byte_no) {
4227   transition(vtos, vtos);
4228 
4229   // TODO-AARCH64 review register usage
4230   const Register Rrecv  = R2_tmp;
4231   const Register Rmtype = R4_tmp;
4232   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4233 
4234   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4235   __ null_check(Rrecv, Rtemp);
4236 
4237   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
4238   // Rmethod: MH.invokeExact_MT method (from f2)
4239 
4240   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
4241 
4242   // do the call
4243   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
4244   __ mov(Rmethod, R5_method);
4245   __ jump_from_interpreted(Rmethod);
4246 }
4247 
4248 void TemplateTable::invokedynamic(int byte_no) {
4249   transition(vtos, vtos);
4250 
4251   // TODO-AARCH64 review register usage
4252   const Register Rcallsite = R4_tmp;
4253   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4254 
4255   prepare_invoke(byte_no, R5_method, Rcallsite);
4256 
4257   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4258   // Rmethod:   MH.linkToCallSite method (from f2)
4259 
4260   // Note:  Rcallsite is already pushed by prepare_invoke
4261 
4262   if (ProfileInterpreter) {
4263     __ profile_call(R2_tmp);
4264   }
4265 
4266   // do the call
4267   __ mov(Rmethod, R5_method);
4268   __ jump_from_interpreted(Rmethod);
4269 }
4270 
4271 //----------------------------------------------------------------------------------------------------
4272 // Allocation
4273 
4274 void TemplateTable::_new() {
4275   transition(vtos, atos);
4276 
4277   const Register Robj   = R0_tos;
4278   const Register Rcpool = R1_tmp;
4279   const Register Rindex = R2_tmp;
4280   const Register Rtags  = R3_tmp;
4281   const Register Rsize  = R3_tmp;
4282 
4283   Register Rklass = R4_tmp;
4284   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4285   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4286 
4287   Label slow_case;
4288   Label done;
4289   Label initialize_header;
4290   Label initialize_object;  // including clearing the fields
4291 
4292   const bool allow_shared_alloc =
4293     Universe::heap()->supports_inline_contig_alloc();
4294 
4295   // Literals
4296   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4297 
4298   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4299   __ get_cpool_and_tags(Rcpool, Rtags);
4300 
4301   // Make sure the class we're about to instantiate has been resolved.
4302   // This is done before loading InstanceKlass to be consistent with the order
4303   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4304   const int tags_offset = Array<u1>::base_offset_in_bytes();
4305   __ add(Rtemp, Rtags, Rindex);
4306 
4307 #ifdef AARCH64
4308   __ add(Rtemp, Rtemp, tags_offset);
4309   __ ldarb(Rtemp, Rtemp);
4310 #else
4311   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4312 
4313   // use Rklass as a scratch
4314   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4315 #endif // AARCH64
4316 
4317   // get InstanceKlass
4318   __ cmp(Rtemp, JVM_CONSTANT_Class);
4319   __ b(slow_case, ne);
4320   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4321 
4322   // make sure klass is initialized & doesn't have finalizer
4323   // make sure klass is fully initialized
4324   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4325   __ cmp(Rtemp, InstanceKlass::fully_initialized);
4326   __ b(slow_case, ne);
4327 
4328   // get instance_size in InstanceKlass (scaled to a count of bytes)
4329   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4330 
4331   // test to see if it has a finalizer or is malformed in some way
4332   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4333   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4334 
4335   // Allocate the instance:
4336   //  If TLAB is enabled:
4337   //    Try to allocate in the TLAB.
4338   //    If fails, go to the slow path.
4339   //  Else If inline contiguous allocations are enabled:
4340   //    Try to allocate in eden.
4341   //    If fails due to heap end, go to slow path.
4342   //
4343   //  If TLAB is enabled OR inline contiguous is enabled:
4344   //    Initialize the allocation.
4345   //    Exit.
4346   //
4347   //  Go to slow path.
4348   if (UseTLAB) {
4349     const Register Rtlab_top = R1_tmp;
4350     const Register Rtlab_end = R2_tmp;
4351     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4352 
4353     __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4354     __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset())));
4355     __ add(Rtlab_top, Robj, Rsize);
4356     __ cmp(Rtlab_top, Rtlab_end);
4357     __ b(slow_case, hi);
4358     __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4359     if (ZeroTLAB) {
4360       // the fields have been already cleared
4361       __ b(initialize_header);
4362     } else {
4363       // initialize both the header and fields
4364       __ b(initialize_object);
4365     }
4366   } else {
4367     // Allocation in the shared Eden, if allowed.
4368     if (allow_shared_alloc) {
4369       const Register Rheap_top_addr = R2_tmp;
4370       const Register Rheap_top = R5_tmp;
4371       const Register Rheap_end = Rtemp;
4372       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4373 
4374       // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4375       __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4376 
4377       Label retry;
4378       __ bind(retry);
4379 
4380 #ifdef AARCH64
4381       __ ldxr(Robj, Rheap_top_addr);
4382 #else
4383       __ ldr(Robj, Address(Rheap_top_addr));
4384 #endif // AARCH64
4385 
4386       __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4387       __ add(Rheap_top, Robj, Rsize);
4388       __ cmp(Rheap_top, Rheap_end);
4389       __ b(slow_case, hi);
4390 
4391       // Update heap top atomically.
4392       // If someone beats us on the allocation, try again, otherwise continue.
4393 #ifdef AARCH64
4394       __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4395       __ cbnz_w(Rtemp2, retry);
4396 #else
4397       __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4398       __ b(retry, ne);
4399 #endif // AARCH64
4400 
4401       __ incr_allocated_bytes(Rsize, Rtemp);
4402     }
4403   }
4404 
4405   if (UseTLAB || allow_shared_alloc) {
4406     const Register Rzero0 = R1_tmp;
4407     const Register Rzero1 = R2_tmp;
4408     const Register Rzero_end = R5_tmp;
4409     const Register Rzero_cur = Rtemp;
4410     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4411 
4412     // The object is initialized before the header.  If the object size is
4413     // zero, go directly to the header initialization.
4414     __ bind(initialize_object);
4415     __ subs(Rsize, Rsize, sizeof(oopDesc));
4416     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4417     __ b(initialize_header, eq);
4418 
4419 #ifdef ASSERT
4420     // make sure Rsize is a multiple of 8
4421     Label L;
4422     __ tst(Rsize, 0x07);
4423     __ b(L, eq);
4424     __ stop("object size is not multiple of 8 - adjust this code");
4425     __ bind(L);
4426 #endif
4427 
4428 #ifdef AARCH64
4429     {
4430       Label loop;
4431       // Step back by 1 word if object size is not a multiple of 2*wordSize.
4432       assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4433       __ andr(Rtemp2, Rsize, (uintx)wordSize);
4434       __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4435 
4436       // Zero by 2 words per iteration.
4437       __ bind(loop);
4438       __ subs(Rsize, Rsize, 2*wordSize);
4439       __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4440       __ b(loop, gt);
4441     }
4442 #else
4443     __ mov(Rzero0, 0);
4444     __ mov(Rzero1, 0);
4445     __ add(Rzero_end, Rzero_cur, Rsize);
4446 
4447     // initialize remaining object fields: Rsize was a multiple of 8
4448     { Label loop;
4449       // loop is unrolled 2 times
4450       __ bind(loop);
4451       // #1
4452       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4453       __ cmp(Rzero_cur, Rzero_end);
4454       // #2
4455       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4456       __ cmp(Rzero_cur, Rzero_end, ne);
4457       __ b(loop, ne);
4458     }
4459 #endif // AARCH64
4460 
4461     // initialize object header only.
4462     __ bind(initialize_header);
4463     if (UseBiasedLocking) {
4464       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4465     } else {
4466       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4467     }
4468     // mark
4469     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4470 
4471     // klass
4472 #ifdef AARCH64
4473     __ store_klass_gap(Robj);
4474 #endif // AARCH64
4475     __ store_klass(Rklass, Robj); // blows Rklass:
4476     Rklass = noreg;
4477 
4478     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4479     if (DTraceAllocProbes) {
4480       // Trigger dtrace event for fastpath
4481       Label Lcontinue;
4482 
4483       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4484       __ cbz(Rtemp, Lcontinue);
4485 
4486       __ push(atos);
4487       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4488       __ pop(atos);
4489 
4490       __ bind(Lcontinue);
4491     }
4492 
4493     __ b(done);
4494   } else {
4495     // jump over literals
4496     __ b(slow_case);
4497   }
4498 
4499   if (allow_shared_alloc) {
4500     __ bind_literal(Lheap_top_addr);
4501   }
4502 
4503   // slow case
4504   __ bind(slow_case);
4505   __ get_constant_pool(Rcpool);
4506   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4507   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4508 
4509   // continue
4510   __ bind(done);
4511 
4512   // StoreStore barrier required after complete initialization
4513   // (headers + content zeroing), before the object may escape.
4514   __ membar(MacroAssembler::StoreStore, R1_tmp);
4515 }
4516 
4517 
4518 void TemplateTable::newarray() {
4519   transition(itos, atos);
4520   __ ldrb(R1, at_bcp(1));
4521   __ mov(R2, R0_tos);
4522   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4523   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4524 }
4525 
4526 
4527 void TemplateTable::anewarray() {
4528   transition(itos, atos);
4529   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4530   __ get_constant_pool(R1);
4531   __ mov(R3, R0_tos);
4532   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4533   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4534 }
4535 
4536 
4537 void TemplateTable::arraylength() {
4538   transition(atos, itos);
4539   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4540   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4541 }
4542 
4543 
4544 void TemplateTable::checkcast() {
4545   transition(atos, atos);
4546   Label done, is_null, quicked, resolved, throw_exception;
4547 
4548   const Register Robj = R0_tos;
4549   const Register Rcpool = R2_tmp;
4550   const Register Rtags = R3_tmp;
4551   const Register Rindex = R4_tmp;
4552   const Register Rsuper = R3_tmp;
4553   const Register Rsub   = R4_tmp;
4554   const Register Rsubtype_check_tmp1 = R1_tmp;
4555   const Register Rsubtype_check_tmp2 = LR_tmp;
4556 
4557   __ cbz(Robj, is_null);
4558 
4559   // Get cpool & tags index
4560   __ get_cpool_and_tags(Rcpool, Rtags);
4561   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4562 
4563   // See if bytecode has already been quicked
4564   __ add(Rtemp, Rtags, Rindex);
4565 #ifdef AARCH64
4566   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4567   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4568   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4569 #else
4570   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4571 #endif // AARCH64
4572 
4573   __ cmp(Rtemp, JVM_CONSTANT_Class);
4574 
4575 #ifndef AARCH64
4576   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4577 #endif // !AARCH64
4578 
4579   __ b(quicked, eq);
4580 
4581   __ push(atos);
4582   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4583   // vm_result_2 has metadata result
4584   __ get_vm_result_2(Rsuper, Robj);
4585   __ pop_ptr(Robj);
4586   __ b(resolved);
4587 
4588   __ bind(throw_exception);
4589   // Come here on failure of subtype check
4590   __ profile_typecheck_failed(R1_tmp);
4591   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4592   __ b(Interpreter::_throw_ClassCastException_entry);
4593 
4594   // Get superklass in Rsuper and subklass in Rsub
4595   __ bind(quicked);
4596   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4597 
4598   __ bind(resolved);
4599   __ load_klass(Rsub, Robj);
4600 
4601   // Generate subtype check. Blows both tmps and Rtemp.
4602   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4603   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4604 
4605   // Come here on success
4606 
4607   // Collect counts on whether this check-cast sees NULLs a lot or not.
4608   if (ProfileInterpreter) {
4609     __ b(done);
4610     __ bind(is_null);
4611     __ profile_null_seen(R1_tmp);
4612   } else {
4613     __ bind(is_null);   // same as 'done'
4614   }
4615   __ bind(done);
4616 }
4617 
4618 
4619 void TemplateTable::instanceof() {
4620   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4621   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4622 
4623   transition(atos, itos);
4624   Label done, is_null, not_subtype, quicked, resolved;
4625 
4626   const Register Robj = R0_tos;
4627   const Register Rcpool = R2_tmp;
4628   const Register Rtags = R3_tmp;
4629   const Register Rindex = R4_tmp;
4630   const Register Rsuper = R3_tmp;
4631   const Register Rsub   = R4_tmp;
4632   const Register Rsubtype_check_tmp1 = R0_tmp;
4633   const Register Rsubtype_check_tmp2 = R1_tmp;
4634 
4635   __ cbz(Robj, is_null);
4636 
4637   __ load_klass(Rsub, Robj);
4638 
4639   // Get cpool & tags index
4640   __ get_cpool_and_tags(Rcpool, Rtags);
4641   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4642 
4643   // See if bytecode has already been quicked
4644   __ add(Rtemp, Rtags, Rindex);
4645 #ifdef AARCH64
4646   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4647   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4648   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4649 #else
4650   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4651 #endif // AARCH64
4652   __ cmp(Rtemp, JVM_CONSTANT_Class);
4653 
4654 #ifndef AARCH64
4655   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4656 #endif // !AARCH64
4657 
4658   __ b(quicked, eq);
4659 
4660   __ push(atos);
4661   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4662   // vm_result_2 has metadata result
4663   __ get_vm_result_2(Rsuper, Robj);
4664   __ pop_ptr(Robj);
4665   __ b(resolved);
4666 
4667   // Get superklass in Rsuper and subklass in Rsub
4668   __ bind(quicked);
4669   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4670 
4671   __ bind(resolved);
4672   __ load_klass(Rsub, Robj);
4673 
4674   // Generate subtype check. Blows both tmps and Rtemp.
4675   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4676 
4677   // Come here on success
4678   __ mov(R0_tos, 1);
4679   __ b(done);
4680 
4681   __ bind(not_subtype);
4682   // Come here on failure
4683   __ profile_typecheck_failed(R1_tmp);
4684   __ mov(R0_tos, 0);
4685 
4686   // Collect counts on whether this test sees NULLs a lot or not.
4687   if (ProfileInterpreter) {
4688     __ b(done);
4689     __ bind(is_null);
4690     __ profile_null_seen(R1_tmp);
4691   } else {
4692     __ bind(is_null);   // same as 'done'
4693   }
4694   __ bind(done);
4695 }
4696 
4697 
4698 //----------------------------------------------------------------------------------------------------
4699 // Breakpoints
4700 void TemplateTable::_breakpoint() {
4701 
4702   // Note: We get here even if we are single stepping..
4703   // jbug inists on setting breakpoints at every bytecode
4704   // even if we are in single step mode.
4705 
4706   transition(vtos, vtos);
4707 
4708   // get the unpatched byte code
4709   __ mov(R1, Rmethod);
4710   __ mov(R2, Rbcp);
4711   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4712 #ifdef AARCH64
4713   __ sxtw(Rtmp_save0, R0);
4714 #else
4715   __ mov(Rtmp_save0, R0);
4716 #endif // AARCH64
4717 
4718   // post the breakpoint event
4719   __ mov(R1, Rmethod);
4720   __ mov(R2, Rbcp);
4721   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4722 
4723   // complete the execution of original bytecode
4724   __ mov(R3_bytecode, Rtmp_save0);
4725   __ dispatch_only_normal(vtos);
4726 }
4727 
4728 
4729 //----------------------------------------------------------------------------------------------------
4730 // Exceptions
4731 
4732 void TemplateTable::athrow() {
4733   transition(atos, vtos);
4734   __ mov(Rexception_obj, R0_tos);
4735   __ null_check(Rexception_obj, Rtemp);
4736   __ b(Interpreter::throw_exception_entry());
4737 }
4738 
4739 
4740 //----------------------------------------------------------------------------------------------------
4741 // Synchronization
4742 //
4743 // Note: monitorenter & exit are symmetric routines; which is reflected
4744 //       in the assembly code structure as well
4745 //
4746 // Stack layout:
4747 //
4748 // [expressions  ] <--- Rstack_top        = expression stack top
4749 // ..
4750 // [expressions  ]
4751 // [monitor entry] <--- monitor block top = expression stack bot
4752 // ..
4753 // [monitor entry]
4754 // [frame data   ] <--- monitor block bot
4755 // ...
4756 // [saved FP     ] <--- FP
4757 
4758 
4759 void TemplateTable::monitorenter() {
4760   transition(atos, vtos);
4761 
4762   const Register Robj = R0_tos;
4763   const Register Rentry = R1_tmp;
4764 
4765   // check for NULL object
4766   __ null_check(Robj, Rtemp);
4767 
4768   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4769   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4770   Label allocate_monitor, allocated;
4771 
4772   // initialize entry pointer
4773   __ mov(Rentry, 0);                             // points to free slot or NULL
4774 
4775   // find a free slot in the monitor block (result in Rentry)
4776   { Label loop, exit;
4777     const Register Rcur = R2_tmp;
4778     const Register Rcur_obj = Rtemp;
4779     const Register Rbottom = R3_tmp;
4780     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4781 
4782     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4783                                  // points to current entry, starting with top-most entry
4784     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4785                                  // points to word before bottom of monitor block
4786 
4787     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4788 #ifndef AARCH64
4789     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4790                                                  // prefetch monitor's object for the first iteration
4791 #endif // !AARCH64
4792     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4793 
4794     __ bind(loop);
4795 #ifdef AARCH64
4796     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4797 #endif // AARCH64
4798     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4799     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4800 
4801     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4802     __ b(exit, eq);                              // if same object then stop searching
4803 
4804     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4805 
4806     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4807 #ifndef AARCH64
4808     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4809                                                  // prefetch monitor's object for the next iteration
4810 #endif // !AARCH64
4811     __ b(loop, ne);                              // if not at bottom then check this entry
4812     __ bind(exit);
4813   }
4814 
4815   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4816 
4817   __ bind(allocate_monitor);
4818 
4819   // allocate one if there's no free slot
4820   { Label loop;
4821     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4822 
4823     // 1. compute new pointers
4824 
4825 #ifdef AARCH64
4826     __ check_extended_sp(Rtemp);
4827     __ sub(SP, SP, entry_size);                  // adjust extended SP
4828     __ mov(Rtemp, SP);
4829     __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
4830 #endif // AARCH64
4831 
4832     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4833                                                  // old monitor block top / expression stack bottom
4834 
4835     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4836     __ check_stack_top_on_expansion();
4837 
4838     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4839 
4840     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4841 
4842     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4843                                                  // set new monitor block top
4844 
4845     // 2. move expression stack contents
4846 
4847     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4848 #ifndef AARCH64
4849     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4850 #endif // !AARCH64
4851     __ b(allocated, eq);
4852 
4853     __ bind(loop);
4854 #ifdef AARCH64
4855     __ ldr(Rtemp, Address(R2_tmp, entry_size));             // load expression stack word from old location
4856 #endif // AARCH64
4857     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4858                                                             // and advance to next word
4859     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4860 #ifndef AARCH64
4861     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4862 #endif // !AARCH64
4863     __ b(loop, ne);                                         // if not at bottom then copy next word
4864   }
4865 
4866   // call run-time routine
4867 
4868   // Rentry: points to monitor entry
4869   __ bind(allocated);
4870 
4871   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4872   // The object has already been poped from the stack, so the expression stack looks correct.
4873   __ add(Rbcp, Rbcp, 1);
4874 
4875   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
4876   __ lock_object(Rentry);
4877 
4878   // check to make sure this monitor doesn't cause stack overflow after locking
4879   __ save_bcp();  // in case of exception
4880   __ arm_stack_overflow_check(0, Rtemp);
4881 
4882   // The bcp has already been incremented. Just need to dispatch to next instruction.
4883   __ dispatch_next(vtos);
4884 }
4885 
4886 
4887 void TemplateTable::monitorexit() {
4888   transition(atos, vtos);
4889 
4890   const Register Robj = R0_tos;
4891   const Register Rcur = R1_tmp;
4892   const Register Rbottom = R2_tmp;
4893   const Register Rcur_obj = Rtemp;
4894 
4895   // check for NULL object
4896   __ null_check(Robj, Rtemp);
4897 
4898   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4899   Label found, throw_exception;
4900 
4901   // find matching slot
4902   { Label loop;
4903     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4904 
4905     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4906                                  // points to current entry, starting with top-most entry
4907     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4908                                  // points to word before bottom of monitor block
4909 
4910     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4911 #ifndef AARCH64
4912     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4913                                                  // prefetch monitor's object for the first iteration
4914 #endif // !AARCH64
4915     __ b(throw_exception, eq);                   // throw exception if there are now monitors
4916 
4917     __ bind(loop);
4918 #ifdef AARCH64
4919     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4920 #endif // AARCH64
4921     // check if current entry is for same object
4922     __ cmp(Rcur_obj, Robj);
4923     __ b(found, eq);                             // if same object then stop searching
4924     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4925     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4926 #ifndef AARCH64
4927     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4928 #endif // !AARCH64
4929     __ b (loop, ne);                             // if not at bottom then check this entry
4930   }
4931 
4932   // error handling. Unlocking was not block-structured
4933   __ bind(throw_exception);
4934   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4935   __ should_not_reach_here();
4936 
4937   // call run-time routine
4938   // Rcur: points to monitor entry
4939   __ bind(found);
4940   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
4941   __ unlock_object(Rcur);
4942   __ pop_ptr(Robj);                              // discard object
4943 }
4944 
4945 
4946 //----------------------------------------------------------------------------------------------------
4947 // Wide instructions
4948 
4949 void TemplateTable::wide() {
4950   transition(vtos, vtos);
4951   __ ldrb(R3_bytecode, at_bcp(1));
4952 
4953   InlinedAddress Ltable((address)Interpreter::_wentry_point);
4954   __ ldr_literal(Rtemp, Ltable);
4955   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
4956 
4957   __ nop(); // to avoid filling CPU pipeline with invalid instructions
4958   __ nop();
4959   __ bind_literal(Ltable);
4960 }
4961 
4962 
4963 //----------------------------------------------------------------------------------------------------
4964 // Multi arrays
4965 
4966 void TemplateTable::multianewarray() {
4967   transition(vtos, atos);
4968   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
4969 
4970   // last dim is on top of stack; we want address of first one:
4971   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
4972   // the latter wordSize to point to the beginning of the array.
4973   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4974   __ sub(R1, Rtemp, wordSize);
4975 
4976   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
4977   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4978   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4979 }