1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "interpreter/interp_masm.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/cpCache.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 
  43 #define __ _masm->
  44 
  45 //----------------------------------------------------------------------------------------------------
  46 // Platform-dependent initialization
  47 
  48 void TemplateTable::pd_initialize() {
  49   // No arm specific initialization
  50 }
  51 
  52 //----------------------------------------------------------------------------------------------------
  53 // Address computation
  54 
  55 // local variables
  56 static inline Address iaddress(int n)            {
  57   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n)            { return iaddress(n + 1); }
  61 #ifndef AARCH64
  62 static inline Address haddress(int n)            { return iaddress(n + 0); }
  63 #endif // !AARCH64
  64 
  65 static inline Address faddress(int n)            { return iaddress(n); }
  66 static inline Address daddress(int n)            { return laddress(n); }
  67 static inline Address aaddress(int n)            { return iaddress(n); }
  68 
  69 
  70 void TemplateTable::get_local_base_addr(Register r, Register index) {
  71   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  72 }
  73 
  74 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  75 #ifdef AARCH64
  76   get_local_base_addr(scratch, index);
  77   return Address(scratch);
  78 #else
  79   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  80 #endif // AARCH64
  81 }
  82 
  83 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  84   return load_iaddress(index, scratch);
  85 }
  86 
  87 Address TemplateTable::load_faddress(Register index, Register scratch) {
  88 #ifdef __SOFTFP__
  89   return load_iaddress(index, scratch);
  90 #else
  91   get_local_base_addr(scratch, index);
  92   return Address(scratch);
  93 #endif // __SOFTFP__
  94 }
  95 
  96 Address TemplateTable::load_daddress(Register index, Register scratch) {
  97   get_local_base_addr(scratch, index);
  98   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  99 }
 100 
 101 // At top of Java expression stack which may be different than SP.
 102 // It isn't for category 1 objects.
 103 static inline Address at_tos() {
 104   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
 105 }
 106 
 107 static inline Address at_tos_p1() {
 108   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 109 }
 110 
 111 static inline Address at_tos_p2() {
 112   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 113 }
 114 
 115 
 116 // 32-bit ARM:
 117 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 118 // separate ldr instructions (supports nonadjacent values).
 119 // Used for longs in all modes, and for doubles in SOFTFP mode.
 120 //
 121 // AArch64: loads long local into R0_tos.
 122 //
 123 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 124   const Register Rlocal_base = tmp;
 125   assert_different_registers(Rlocal_index, tmp);
 126 
 127   get_local_base_addr(Rlocal_base, Rlocal_index);
 128 #ifdef AARCH64
 129   __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 130 #else
 131   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 132   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 133 #endif // AARCH64
 134 }
 135 
 136 
 137 // 32-bit ARM:
 138 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 139 // separate str instructions (supports nonadjacent values).
 140 // Used for longs in all modes, and for doubles in SOFTFP mode
 141 //
 142 // AArch64: stores R0_tos to long local.
 143 //
 144 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 145   const Register Rlocal_base = tmp;
 146   assert_different_registers(Rlocal_index, tmp);
 147 
 148   get_local_base_addr(Rlocal_base, Rlocal_index);
 149 #ifdef AARCH64
 150   __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 151 #else
 152   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 153   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 154 #endif // AARCH64
 155 }
 156 
 157 // Returns address of Java array element using temp register as address base.
 158 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 159   int logElemSize = exact_log2(type2aelembytes(elemType));
 160   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 161   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 162 }
 163 
 164 //----------------------------------------------------------------------------------------------------
 165 // Condition conversion
 166 AsmCondition convNegCond(TemplateTable::Condition cc) {
 167   switch (cc) {
 168     case TemplateTable::equal        : return ne;
 169     case TemplateTable::not_equal    : return eq;
 170     case TemplateTable::less         : return ge;
 171     case TemplateTable::less_equal   : return gt;
 172     case TemplateTable::greater      : return le;
 173     case TemplateTable::greater_equal: return lt;
 174   }
 175   ShouldNotReachHere();
 176   return nv;
 177 }
 178 
 179 //----------------------------------------------------------------------------------------------------
 180 // Miscelaneous helper routines
 181 
 182 // Store an oop (or NULL) at the address described by obj.
 183 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 184 // Also destroys new_val and obj.base().
 185 static void do_oop_store(InterpreterMacroAssembler* _masm,
 186                          Address obj,
 187                          Register new_val,
 188                          Register tmp1,
 189                          Register tmp2,
 190                          Register tmp3,
 191                          bool is_null,
 192                          DecoratorSet decorators = 0) {
 193 
 194   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 195   if (is_null) {
 196     __ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
 197   } else {
 198     __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
 199   }
 200 }
 201 
 202 static void do_oop_load(InterpreterMacroAssembler* _masm,
 203                         Register dst,
 204                         Address obj,
 205                         DecoratorSet decorators = 0) {
 206   __ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
 207 }
 208 
 209 Address TemplateTable::at_bcp(int offset) {
 210   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 211   return Address(Rbcp, offset);
 212 }
 213 
 214 
 215 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
 216 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 217                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 218                                    int byte_no) {
 219   assert_different_registers(bc_reg, temp_reg);
 220   if (!RewriteBytecodes)  return;
 221   Label L_patch_done;
 222 
 223   switch (bc) {
 224   case Bytecodes::_fast_aputfield:
 225   case Bytecodes::_fast_bputfield:
 226   case Bytecodes::_fast_zputfield:
 227   case Bytecodes::_fast_cputfield:
 228   case Bytecodes::_fast_dputfield:
 229   case Bytecodes::_fast_fputfield:
 230   case Bytecodes::_fast_iputfield:
 231   case Bytecodes::_fast_lputfield:
 232   case Bytecodes::_fast_sputfield:
 233     {
 234       // We skip bytecode quickening for putfield instructions when
 235       // the put_code written to the constant pool cache is zero.
 236       // This is required so that every execution of this instruction
 237       // calls out to InterpreterRuntime::resolve_get_put to do
 238       // additional, required work.
 239       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 240       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 241       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 242       __ mov(bc_reg, bc);
 243       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 244     }
 245     break;
 246   default:
 247     assert(byte_no == -1, "sanity");
 248     // the pair bytecodes have already done the load.
 249     if (load_bc_into_bc_reg) {
 250       __ mov(bc_reg, bc);
 251     }
 252   }
 253 
 254   if (__ can_post_breakpoint()) {
 255     Label L_fast_patch;
 256     // if a breakpoint is present we can't rewrite the stream directly
 257     __ ldrb(temp_reg, at_bcp(0));
 258     __ cmp(temp_reg, Bytecodes::_breakpoint);
 259     __ b(L_fast_patch, ne);
 260     if (bc_reg != R3) {
 261       __ mov(R3, bc_reg);
 262     }
 263     __ mov(R1, Rmethod);
 264     __ mov(R2, Rbcp);
 265     // Let breakpoint table handling rewrite to quicker bytecode
 266     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 267     __ b(L_patch_done);
 268     __ bind(L_fast_patch);
 269   }
 270 
 271 #ifdef ASSERT
 272   Label L_okay;
 273   __ ldrb(temp_reg, at_bcp(0));
 274   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 275   __ b(L_okay, eq);
 276   __ cmp(temp_reg, bc_reg);
 277   __ b(L_okay, eq);
 278   __ stop("patching the wrong bytecode");
 279   __ bind(L_okay);
 280 #endif
 281 
 282   // patch bytecode
 283   __ strb(bc_reg, at_bcp(0));
 284   __ bind(L_patch_done);
 285 }
 286 
 287 //----------------------------------------------------------------------------------------------------
 288 // Individual instructions
 289 
 290 void TemplateTable::nop() {
 291   transition(vtos, vtos);
 292   // nothing to do
 293 }
 294 
 295 void TemplateTable::shouldnotreachhere() {
 296   transition(vtos, vtos);
 297   __ stop("shouldnotreachhere bytecode");
 298 }
 299 
 300 
 301 
 302 void TemplateTable::aconst_null() {
 303   transition(vtos, atos);
 304   __ mov(R0_tos, 0);
 305 }
 306 
 307 
 308 void TemplateTable::iconst(int value) {
 309   transition(vtos, itos);
 310   __ mov_slow(R0_tos, value);
 311 }
 312 
 313 
 314 void TemplateTable::lconst(int value) {
 315   transition(vtos, ltos);
 316   assert((value == 0) || (value == 1), "unexpected long constant");
 317   __ mov(R0_tos, value);
 318 #ifndef AARCH64
 319   __ mov(R1_tos_hi, 0);
 320 #endif // !AARCH64
 321 }
 322 
 323 
 324 void TemplateTable::fconst(int value) {
 325   transition(vtos, ftos);
 326 #ifdef AARCH64
 327   switch(value) {
 328   case 0:   __ fmov_sw(S0_tos, ZR);    break;
 329   case 1:   __ fmov_s (S0_tos, 0x70);  break;
 330   case 2:   __ fmov_s (S0_tos, 0x00);  break;
 331   default:  ShouldNotReachHere();      break;
 332   }
 333 #else
 334   const int zero = 0;         // 0.0f
 335   const int one = 0x3f800000; // 1.0f
 336   const int two = 0x40000000; // 2.0f
 337 
 338   switch(value) {
 339   case 0:   __ mov(R0_tos, zero);   break;
 340   case 1:   __ mov(R0_tos, one);    break;
 341   case 2:   __ mov(R0_tos, two);    break;
 342   default:  ShouldNotReachHere();   break;
 343   }
 344 
 345 #ifndef __SOFTFP__
 346   __ fmsr(S0_tos, R0_tos);
 347 #endif // !__SOFTFP__
 348 #endif // AARCH64
 349 }
 350 
 351 
 352 void TemplateTable::dconst(int value) {
 353   transition(vtos, dtos);
 354 #ifdef AARCH64
 355   switch(value) {
 356   case 0:   __ fmov_dx(D0_tos, ZR);    break;
 357   case 1:   __ fmov_d (D0_tos, 0x70);  break;
 358   default:  ShouldNotReachHere();      break;
 359   }
 360 #else
 361   const int one_lo = 0;            // low part of 1.0
 362   const int one_hi = 0x3ff00000;   // high part of 1.0
 363 
 364   if (value == 0) {
 365 #ifdef __SOFTFP__
 366     __ mov(R0_tos_lo, 0);
 367     __ mov(R1_tos_hi, 0);
 368 #else
 369     __ mov(R0_tmp, 0);
 370     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 371 #endif // __SOFTFP__
 372   } else if (value == 1) {
 373     __ mov(R0_tos_lo, one_lo);
 374     __ mov_slow(R1_tos_hi, one_hi);
 375 #ifndef __SOFTFP__
 376     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 377 #endif // !__SOFTFP__
 378   } else {
 379     ShouldNotReachHere();
 380   }
 381 #endif // AARCH64
 382 }
 383 
 384 
 385 void TemplateTable::bipush() {
 386   transition(vtos, itos);
 387   __ ldrsb(R0_tos, at_bcp(1));
 388 }
 389 
 390 
 391 void TemplateTable::sipush() {
 392   transition(vtos, itos);
 393   __ ldrsb(R0_tmp, at_bcp(1));
 394   __ ldrb(R1_tmp, at_bcp(2));
 395   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 396 }
 397 
 398 
 399 void TemplateTable::ldc(bool wide) {
 400   transition(vtos, vtos);
 401   Label fastCase, Condy, Done;
 402 
 403   const Register Rindex = R1_tmp;
 404   const Register Rcpool = R2_tmp;
 405   const Register Rtags  = R3_tmp;
 406   const Register RtagType = R3_tmp;
 407 
 408   if (wide) {
 409     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 410   } else {
 411     __ ldrb(Rindex, at_bcp(1));
 412   }
 413   __ get_cpool_and_tags(Rcpool, Rtags);
 414 
 415   const int base_offset = ConstantPool::header_size() * wordSize;
 416   const int tags_offset = Array<u1>::base_offset_in_bytes();
 417 
 418   // get const type
 419   __ add(Rtemp, Rtags, tags_offset);
 420 #ifdef AARCH64
 421   __ add(Rtemp, Rtemp, Rindex);
 422   __ ldarb(RtagType, Rtemp);  // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
 423 #else
 424   __ ldrb(RtagType, Address(Rtemp, Rindex));
 425   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 426 #endif // AARCH64
 427 
 428   // unresolved class - get the resolved class
 429   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 430 
 431   // unresolved class in error (resolution failed) - call into runtime
 432   // so that the same error from first resolution attempt is thrown.
 433 #ifdef AARCH64
 434   __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
 435   __ cond_cmp(RtagType, Rtemp, ne);
 436 #else
 437   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 438 #endif // AARCH64
 439 
 440   // resolved class - need to call vm to get java mirror of the class
 441   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 442 
 443   __ b(fastCase, ne);
 444 
 445   // slow case - call runtime
 446   __ mov(R1, wide);
 447   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 448   __ push(atos);
 449   __ b(Done);
 450 
 451   // int, float, String
 452   __ bind(fastCase);
 453 
 454   __ cmp(RtagType, JVM_CONSTANT_Integer);
 455   __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 456   __ b(Condy, ne);
 457 
 458   // itos, ftos
 459   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 460   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 461 
 462   // floats and ints are placed on stack in the same way, so
 463   // we can use push(itos) to transfer float value without VFP
 464   __ push(itos);
 465   __ b(Done);
 466 
 467   __ bind(Condy);
 468   condy_helper(Done);
 469 
 470   __ bind(Done);
 471 }
 472 
 473 // Fast path for caching oop constants.
 474 void TemplateTable::fast_aldc(bool wide) {
 475   transition(vtos, atos);
 476   int index_size = wide ? sizeof(u2) : sizeof(u1);
 477   Label resolved;
 478 
 479   // We are resolved if the resolved reference cache entry contains a
 480   // non-null object (CallSite, etc.)
 481   assert_different_registers(R0_tos, R2_tmp);
 482   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 483   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 484   __ cbnz(R0_tos, resolved);
 485 
 486   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 487 
 488   // first time invocation - must resolve first
 489   __ mov(R1, (int)bytecode());
 490   __ call_VM(R0_tos, entry, R1);
 491   __ bind(resolved);
 492 
 493   { // Check for the null sentinel.
 494     // If we just called the VM, that already did the mapping for us,
 495     // but it's harmless to retry.
 496     Label notNull;
 497     Register result = R0;
 498     Register tmp = R1;
 499     Register rarg = R2;
 500 
 501     // Stash null_sentinel address to get its value later
 502     __ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
 503     __ ldr(tmp, Address(rarg));
 504     __ cmp(result, tmp);
 505     __ b(notNull, ne);
 506     __ mov(result, 0);  // NULL object reference
 507     __ bind(notNull);
 508   }
 509 
 510   if (VerifyOops) {
 511     __ verify_oop(R0_tos);
 512   }
 513 }
 514 
 515 void TemplateTable::ldc2_w() {
 516   transition(vtos, vtos);
 517   const Register Rtags  = R2_tmp;
 518   const Register Rindex = R3_tmp;
 519   const Register Rcpool = R4_tmp;
 520   const Register Rbase  = R5_tmp;
 521 
 522   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 523 
 524   __ get_cpool_and_tags(Rcpool, Rtags);
 525   const int base_offset = ConstantPool::header_size() * wordSize;
 526   const int tags_offset = Array<u1>::base_offset_in_bytes();
 527 
 528   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 529 
 530   Label Condy, exit;
 531 #ifdef __ABI_HARD__
 532   Label Long;
 533   // get type from tags
 534   __ add(Rtemp, Rtags, tags_offset);
 535   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 536   __ cmp(Rtemp, JVM_CONSTANT_Double);
 537   __ b(Long, ne);
 538   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 539 
 540   __ push(dtos);
 541   __ b(exit);
 542   __ bind(Long);
 543 #endif
 544 
 545   __ cmp(Rtemp, JVM_CONSTANT_Long);
 546   __ b(Condy, ne);
 547 #ifdef AARCH64
 548   __ ldr(R0_tos, Address(Rbase, base_offset));
 549 #else
 550   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 551   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 552 #endif // AARCH64
 553   __ push(ltos);
 554   __ b(exit);
 555 
 556   __ bind(Condy);
 557   condy_helper(exit);
 558 
 559   __ bind(exit);
 560 }
 561 
 562 
 563 void TemplateTable::condy_helper(Label& Done)
 564 {
 565   Register obj   = R0_tmp;
 566   Register rtmp  = R1_tmp;
 567   Register flags = R2_tmp;
 568   Register off   = R3_tmp;
 569 
 570   __ mov(rtmp, (int) bytecode());
 571   __ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp);
 572   __ get_vm_result_2(flags, rtmp);
 573 
 574   // VMr = obj = base address to find primitive value to push
 575   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 576   __ mov(off, flags);
 577 
 578 #ifdef AARCH64
 579   __ andr(off, off, (unsigned)ConstantPoolCacheEntry::field_index_mask);
 580 #else
 581   __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 582   __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 583 #endif
 584 
 585   const Address field(obj, off);
 586 
 587   __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
 588   // Make sure we don't need to mask flags after the above shift
 589   ConstantPoolCacheEntry::verify_tos_state_shift();
 590 
 591   switch (bytecode()) {
 592     case Bytecodes::_ldc:
 593     case Bytecodes::_ldc_w:
 594       {
 595         // tos in (itos, ftos, stos, btos, ctos, ztos)
 596         Label notIntFloat, notShort, notByte, notChar, notBool;
 597         __ cmp(flags, itos);
 598         __ cond_cmp(flags, ftos, ne);
 599         __ b(notIntFloat, ne);
 600         __ ldr(R0_tos, field);
 601         __ push(itos);
 602         __ b(Done);
 603 
 604         __ bind(notIntFloat);
 605         __ cmp(flags, stos);
 606         __ b(notShort, ne);
 607         __ ldrsh(R0_tos, field);
 608         __ push(stos);
 609         __ b(Done);
 610 
 611         __ bind(notShort);
 612         __ cmp(flags, btos);
 613         __ b(notByte, ne);
 614         __ ldrsb(R0_tos, field);
 615         __ push(btos);
 616         __ b(Done);
 617 
 618         __ bind(notByte);
 619         __ cmp(flags, ctos);
 620         __ b(notChar, ne);
 621         __ ldrh(R0_tos, field);
 622         __ push(ctos);
 623         __ b(Done);
 624 
 625         __ bind(notChar);
 626         __ cmp(flags, ztos);
 627         __ b(notBool, ne);
 628         __ ldrsb(R0_tos, field);
 629         __ push(ztos);
 630         __ b(Done);
 631 
 632         __ bind(notBool);
 633         break;
 634       }
 635 
 636     case Bytecodes::_ldc2_w:
 637       {
 638         Label notLongDouble;
 639         __ cmp(flags, ltos);
 640         __ cond_cmp(flags, dtos, ne);
 641         __ b(notLongDouble, ne);
 642 
 643 #ifdef AARCH64
 644         __ ldr(R0_tos, field);
 645 #else
 646         __ add(rtmp, obj, wordSize);
 647         __ ldr(R0_tos_lo, Address(obj, off));
 648         __ ldr(R1_tos_hi, Address(rtmp, off));
 649 #endif
 650         __ push(ltos);
 651         __ b(Done);
 652 
 653         __ bind(notLongDouble);
 654 
 655         break;
 656       }
 657 
 658     default:
 659       ShouldNotReachHere();
 660     }
 661 
 662     __ stop("bad ldc/condy");
 663 }
 664 
 665 
 666 void TemplateTable::locals_index(Register reg, int offset) {
 667   __ ldrb(reg, at_bcp(offset));
 668 }
 669 
 670 void TemplateTable::iload() {
 671   iload_internal();
 672 }
 673 
 674 void TemplateTable::nofast_iload() {
 675   iload_internal(may_not_rewrite);
 676 }
 677 
 678 void TemplateTable::iload_internal(RewriteControl rc) {
 679   transition(vtos, itos);
 680 
 681   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 682     Label rewrite, done;
 683     const Register next_bytecode = R1_tmp;
 684     const Register target_bytecode = R2_tmp;
 685 
 686     // get next byte
 687     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 688     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 689     // last two iloads in a pair.  Comparing against fast_iload means that
 690     // the next bytecode is neither an iload or a caload, and therefore
 691     // an iload pair.
 692     __ cmp(next_bytecode, Bytecodes::_iload);
 693     __ b(done, eq);
 694 
 695     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 696     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 697     __ b(rewrite, eq);
 698 
 699     // if _caload, rewrite to fast_icaload
 700     __ cmp(next_bytecode, Bytecodes::_caload);
 701     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 702     __ b(rewrite, eq);
 703 
 704     // rewrite so iload doesn't check again.
 705     __ mov(target_bytecode, Bytecodes::_fast_iload);
 706 
 707     // rewrite
 708     // R2: fast bytecode
 709     __ bind(rewrite);
 710     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 711     __ bind(done);
 712   }
 713 
 714   // Get the local value into tos
 715   const Register Rlocal_index = R1_tmp;
 716   locals_index(Rlocal_index);
 717   Address local = load_iaddress(Rlocal_index, Rtemp);
 718   __ ldr_s32(R0_tos, local);
 719 }
 720 
 721 
 722 void TemplateTable::fast_iload2() {
 723   transition(vtos, itos);
 724   const Register Rlocal_index = R1_tmp;
 725 
 726   locals_index(Rlocal_index);
 727   Address local = load_iaddress(Rlocal_index, Rtemp);
 728   __ ldr_s32(R0_tos, local);
 729   __ push(itos);
 730 
 731   locals_index(Rlocal_index, 3);
 732   local = load_iaddress(Rlocal_index, Rtemp);
 733   __ ldr_s32(R0_tos, local);
 734 }
 735 
 736 void TemplateTable::fast_iload() {
 737   transition(vtos, itos);
 738   const Register Rlocal_index = R1_tmp;
 739 
 740   locals_index(Rlocal_index);
 741   Address local = load_iaddress(Rlocal_index, Rtemp);
 742   __ ldr_s32(R0_tos, local);
 743 }
 744 
 745 
 746 void TemplateTable::lload() {
 747   transition(vtos, ltos);
 748   const Register Rlocal_index = R2_tmp;
 749 
 750   locals_index(Rlocal_index);
 751   load_category2_local(Rlocal_index, R3_tmp);
 752 }
 753 
 754 
 755 void TemplateTable::fload() {
 756   transition(vtos, ftos);
 757   const Register Rlocal_index = R2_tmp;
 758 
 759   // Get the local value into tos
 760   locals_index(Rlocal_index);
 761   Address local = load_faddress(Rlocal_index, Rtemp);
 762 #ifdef __SOFTFP__
 763   __ ldr(R0_tos, local);
 764 #else
 765   __ ldr_float(S0_tos, local);
 766 #endif // __SOFTFP__
 767 }
 768 
 769 
 770 void TemplateTable::dload() {
 771   transition(vtos, dtos);
 772   const Register Rlocal_index = R2_tmp;
 773 
 774   locals_index(Rlocal_index);
 775 
 776 #ifdef __SOFTFP__
 777   load_category2_local(Rlocal_index, R3_tmp);
 778 #else
 779   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 780 #endif // __SOFTFP__
 781 }
 782 
 783 
 784 void TemplateTable::aload() {
 785   transition(vtos, atos);
 786   const Register Rlocal_index = R1_tmp;
 787 
 788   locals_index(Rlocal_index);
 789   Address local = load_aaddress(Rlocal_index, Rtemp);
 790   __ ldr(R0_tos, local);
 791 }
 792 
 793 
 794 void TemplateTable::locals_index_wide(Register reg) {
 795   assert_different_registers(reg, Rtemp);
 796   __ ldrb(Rtemp, at_bcp(2));
 797   __ ldrb(reg, at_bcp(3));
 798   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 799 }
 800 
 801 
 802 void TemplateTable::wide_iload() {
 803   transition(vtos, itos);
 804   const Register Rlocal_index = R2_tmp;
 805 
 806   locals_index_wide(Rlocal_index);
 807   Address local = load_iaddress(Rlocal_index, Rtemp);
 808   __ ldr_s32(R0_tos, local);
 809 }
 810 
 811 
 812 void TemplateTable::wide_lload() {
 813   transition(vtos, ltos);
 814   const Register Rlocal_index = R2_tmp;
 815   const Register Rlocal_base = R3_tmp;
 816 
 817   locals_index_wide(Rlocal_index);
 818   load_category2_local(Rlocal_index, R3_tmp);
 819 }
 820 
 821 
 822 void TemplateTable::wide_fload() {
 823   transition(vtos, ftos);
 824   const Register Rlocal_index = R2_tmp;
 825 
 826   locals_index_wide(Rlocal_index);
 827   Address local = load_faddress(Rlocal_index, Rtemp);
 828 #ifdef __SOFTFP__
 829   __ ldr(R0_tos, local);
 830 #else
 831   __ ldr_float(S0_tos, local);
 832 #endif // __SOFTFP__
 833 }
 834 
 835 
 836 void TemplateTable::wide_dload() {
 837   transition(vtos, dtos);
 838   const Register Rlocal_index = R2_tmp;
 839 
 840   locals_index_wide(Rlocal_index);
 841 #ifdef __SOFTFP__
 842   load_category2_local(Rlocal_index, R3_tmp);
 843 #else
 844   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 845 #endif // __SOFTFP__
 846 }
 847 
 848 
 849 void TemplateTable::wide_aload() {
 850   transition(vtos, atos);
 851   const Register Rlocal_index = R2_tmp;
 852 
 853   locals_index_wide(Rlocal_index);
 854   Address local = load_aaddress(Rlocal_index, Rtemp);
 855   __ ldr(R0_tos, local);
 856 }
 857 
 858 void TemplateTable::index_check(Register array, Register index) {
 859   // Pop ptr into array
 860   __ pop_ptr(array);
 861   index_check_without_pop(array, index);
 862 }
 863 
 864 void TemplateTable::index_check_without_pop(Register array, Register index) {
 865   assert_different_registers(array, index, Rtemp);
 866   // check array
 867   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 868   // check index
 869   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 870   __ cmp_32(index, Rtemp);
 871   if (index != R4_ArrayIndexOutOfBounds_index) {
 872     // convention with generate_ArrayIndexOutOfBounds_handler()
 873     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 874   }
 875   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 876 }
 877 
 878 
 879 void TemplateTable::iaload() {
 880   transition(itos, itos);
 881   const Register Rarray = R1_tmp;
 882   const Register Rindex = R0_tos;
 883 
 884   index_check(Rarray, Rindex);
 885   __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
 886 }
 887 
 888 
 889 void TemplateTable::laload() {
 890   transition(itos, ltos);
 891   const Register Rarray = R1_tmp;
 892   const Register Rindex = R0_tos;
 893 
 894   index_check(Rarray, Rindex);
 895 
 896 #ifdef AARCH64
 897   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 898 #else
 899   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 900   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
 901   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 902 #endif // AARCH64
 903 }
 904 
 905 
 906 void TemplateTable::faload() {
 907   transition(itos, ftos);
 908   const Register Rarray = R1_tmp;
 909   const Register Rindex = R0_tos;
 910 
 911   index_check(Rarray, Rindex);
 912 
 913   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
 914 #ifdef __SOFTFP__
 915   __ ldr(R0_tos, addr);
 916 #else
 917   __ ldr_float(S0_tos, addr);
 918 #endif // __SOFTFP__
 919 }
 920 
 921 
 922 void TemplateTable::daload() {
 923   transition(itos, dtos);
 924   const Register Rarray = R1_tmp;
 925   const Register Rindex = R0_tos;
 926 
 927   index_check(Rarray, Rindex);
 928 
 929 #ifdef __SOFTFP__
 930   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 931   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
 932   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 933 #else
 934   __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
 935 #endif // __SOFTFP__
 936 }
 937 
 938 
 939 void TemplateTable::aaload() {
 940   transition(itos, atos);
 941   const Register Rarray = R1_tmp;
 942   const Register Rindex = R0_tos;
 943 
 944   index_check(Rarray, Rindex);
 945   do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IN_HEAP_ARRAY);
 946 }
 947 
 948 
 949 void TemplateTable::baload() {
 950   transition(itos, itos);
 951   const Register Rarray = R1_tmp;
 952   const Register Rindex = R0_tos;
 953 
 954   index_check(Rarray, Rindex);
 955   __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
 956 }
 957 
 958 
 959 void TemplateTable::caload() {
 960   transition(itos, itos);
 961   const Register Rarray = R1_tmp;
 962   const Register Rindex = R0_tos;
 963 
 964   index_check(Rarray, Rindex);
 965   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 966 }
 967 
 968 
 969 // iload followed by caload frequent pair
 970 void TemplateTable::fast_icaload() {
 971   transition(vtos, itos);
 972   const Register Rlocal_index = R1_tmp;
 973   const Register Rarray = R1_tmp;
 974   const Register Rindex = R4_tmp; // index_check prefers index on R4
 975   assert_different_registers(Rlocal_index, Rindex);
 976   assert_different_registers(Rarray, Rindex);
 977 
 978   // load index out of locals
 979   locals_index(Rlocal_index);
 980   Address local = load_iaddress(Rlocal_index, Rtemp);
 981   __ ldr_s32(Rindex, local);
 982 
 983   // get array element
 984   index_check(Rarray, Rindex);
 985   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 986 }
 987 
 988 
 989 void TemplateTable::saload() {
 990   transition(itos, itos);
 991   const Register Rarray = R1_tmp;
 992   const Register Rindex = R0_tos;
 993 
 994   index_check(Rarray, Rindex);
 995   __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
 996 }
 997 
 998 
 999 void TemplateTable::iload(int n) {
1000   transition(vtos, itos);
1001   __ ldr_s32(R0_tos, iaddress(n));
1002 }
1003 
1004 
1005 void TemplateTable::lload(int n) {
1006   transition(vtos, ltos);
1007 #ifdef AARCH64
1008   __ ldr(R0_tos, laddress(n));
1009 #else
1010   __ ldr(R0_tos_lo, laddress(n));
1011   __ ldr(R1_tos_hi, haddress(n));
1012 #endif // AARCH64
1013 }
1014 
1015 
1016 void TemplateTable::fload(int n) {
1017   transition(vtos, ftos);
1018 #ifdef __SOFTFP__
1019   __ ldr(R0_tos, faddress(n));
1020 #else
1021   __ ldr_float(S0_tos, faddress(n));
1022 #endif // __SOFTFP__
1023 }
1024 
1025 
1026 void TemplateTable::dload(int n) {
1027   transition(vtos, dtos);
1028 #ifdef __SOFTFP__
1029   __ ldr(R0_tos_lo, laddress(n));
1030   __ ldr(R1_tos_hi, haddress(n));
1031 #else
1032   __ ldr_double(D0_tos, daddress(n));
1033 #endif // __SOFTFP__
1034 }
1035 
1036 
1037 void TemplateTable::aload(int n) {
1038   transition(vtos, atos);
1039   __ ldr(R0_tos, aaddress(n));
1040 }
1041 
1042 void TemplateTable::aload_0() {
1043   aload_0_internal();
1044 }
1045 
1046 void TemplateTable::nofast_aload_0() {
1047   aload_0_internal(may_not_rewrite);
1048 }
1049 
1050 void TemplateTable::aload_0_internal(RewriteControl rc) {
1051   transition(vtos, atos);
1052   // According to bytecode histograms, the pairs:
1053   //
1054   // _aload_0, _fast_igetfield
1055   // _aload_0, _fast_agetfield
1056   // _aload_0, _fast_fgetfield
1057   //
1058   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
1059   // bytecode checks if the next bytecode is either _fast_igetfield,
1060   // _fast_agetfield or _fast_fgetfield and then rewrites the
1061   // current bytecode into a pair bytecode; otherwise it rewrites the current
1062   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
1063   //
1064   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
1065   //       otherwise we may miss an opportunity for a pair.
1066   //
1067   // Also rewrite frequent pairs
1068   //   aload_0, aload_1
1069   //   aload_0, iload_1
1070   // These bytecodes with a small amount of code are most profitable to rewrite
1071   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
1072     Label rewrite, done;
1073     const Register next_bytecode = R1_tmp;
1074     const Register target_bytecode = R2_tmp;
1075 
1076     // get next byte
1077     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
1078 
1079     // if _getfield then wait with rewrite
1080     __ cmp(next_bytecode, Bytecodes::_getfield);
1081     __ b(done, eq);
1082 
1083     // if _igetfield then rewrite to _fast_iaccess_0
1084     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1085     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1086     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1087     __ b(rewrite, eq);
1088 
1089     // if _agetfield then rewrite to _fast_aaccess_0
1090     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1091     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1092     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1093     __ b(rewrite, eq);
1094 
1095     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1096     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1097     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1098 
1099     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1100 #ifdef AARCH64
1101     __ mov(Rtemp, Bytecodes::_fast_faccess_0);
1102     __ mov(target_bytecode, Bytecodes::_fast_aload_0);
1103     __ mov(target_bytecode, Rtemp, eq);
1104 #else
1105     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1106     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1107 #endif // AARCH64
1108 
1109     // rewrite
1110     __ bind(rewrite);
1111     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1112 
1113     __ bind(done);
1114   }
1115 
1116   aload(0);
1117 }
1118 
1119 void TemplateTable::istore() {
1120   transition(itos, vtos);
1121   const Register Rlocal_index = R2_tmp;
1122 
1123   locals_index(Rlocal_index);
1124   Address local = load_iaddress(Rlocal_index, Rtemp);
1125   __ str_32(R0_tos, local);
1126 }
1127 
1128 
1129 void TemplateTable::lstore() {
1130   transition(ltos, vtos);
1131   const Register Rlocal_index = R2_tmp;
1132 
1133   locals_index(Rlocal_index);
1134   store_category2_local(Rlocal_index, R3_tmp);
1135 }
1136 
1137 
1138 void TemplateTable::fstore() {
1139   transition(ftos, vtos);
1140   const Register Rlocal_index = R2_tmp;
1141 
1142   locals_index(Rlocal_index);
1143   Address local = load_faddress(Rlocal_index, Rtemp);
1144 #ifdef __SOFTFP__
1145   __ str(R0_tos, local);
1146 #else
1147   __ str_float(S0_tos, local);
1148 #endif // __SOFTFP__
1149 }
1150 
1151 
1152 void TemplateTable::dstore() {
1153   transition(dtos, vtos);
1154   const Register Rlocal_index = R2_tmp;
1155 
1156   locals_index(Rlocal_index);
1157 
1158 #ifdef __SOFTFP__
1159   store_category2_local(Rlocal_index, R3_tmp);
1160 #else
1161   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1162 #endif // __SOFTFP__
1163 }
1164 
1165 
1166 void TemplateTable::astore() {
1167   transition(vtos, vtos);
1168   const Register Rlocal_index = R1_tmp;
1169 
1170   __ pop_ptr(R0_tos);
1171   locals_index(Rlocal_index);
1172   Address local = load_aaddress(Rlocal_index, Rtemp);
1173   __ str(R0_tos, local);
1174 }
1175 
1176 
1177 void TemplateTable::wide_istore() {
1178   transition(vtos, vtos);
1179   const Register Rlocal_index = R2_tmp;
1180 
1181   __ pop_i(R0_tos);
1182   locals_index_wide(Rlocal_index);
1183   Address local = load_iaddress(Rlocal_index, Rtemp);
1184   __ str_32(R0_tos, local);
1185 }
1186 
1187 
1188 void TemplateTable::wide_lstore() {
1189   transition(vtos, vtos);
1190   const Register Rlocal_index = R2_tmp;
1191   const Register Rlocal_base = R3_tmp;
1192 
1193 #ifdef AARCH64
1194   __ pop_l(R0_tos);
1195 #else
1196   __ pop_l(R0_tos_lo, R1_tos_hi);
1197 #endif // AARCH64
1198 
1199   locals_index_wide(Rlocal_index);
1200   store_category2_local(Rlocal_index, R3_tmp);
1201 }
1202 
1203 
1204 void TemplateTable::wide_fstore() {
1205   wide_istore();
1206 }
1207 
1208 
1209 void TemplateTable::wide_dstore() {
1210   wide_lstore();
1211 }
1212 
1213 
1214 void TemplateTable::wide_astore() {
1215   transition(vtos, vtos);
1216   const Register Rlocal_index = R2_tmp;
1217 
1218   __ pop_ptr(R0_tos);
1219   locals_index_wide(Rlocal_index);
1220   Address local = load_aaddress(Rlocal_index, Rtemp);
1221   __ str(R0_tos, local);
1222 }
1223 
1224 
1225 void TemplateTable::iastore() {
1226   transition(itos, vtos);
1227   const Register Rindex = R4_tmp; // index_check prefers index in R4
1228   const Register Rarray = R3_tmp;
1229   // R0_tos: value
1230 
1231   __ pop_i(Rindex);
1232   index_check(Rarray, Rindex);
1233   __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1234 }
1235 
1236 
1237 void TemplateTable::lastore() {
1238   transition(ltos, vtos);
1239   const Register Rindex = R4_tmp; // index_check prefers index in R4
1240   const Register Rarray = R3_tmp;
1241   // R0_tos_lo:R1_tos_hi: value
1242 
1243   __ pop_i(Rindex);
1244   index_check(Rarray, Rindex);
1245 
1246 #ifdef AARCH64
1247   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1248 #else
1249   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1250   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1251   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1252 #endif // AARCH64
1253 }
1254 
1255 
1256 void TemplateTable::fastore() {
1257   transition(ftos, vtos);
1258   const Register Rindex = R4_tmp; // index_check prefers index in R4
1259   const Register Rarray = R3_tmp;
1260   // S0_tos/R0_tos: value
1261 
1262   __ pop_i(Rindex);
1263   index_check(Rarray, Rindex);
1264   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1265 
1266 #ifdef __SOFTFP__
1267   __ str(R0_tos, addr);
1268 #else
1269   __ str_float(S0_tos, addr);
1270 #endif // __SOFTFP__
1271 }
1272 
1273 
1274 void TemplateTable::dastore() {
1275   transition(dtos, vtos);
1276   const Register Rindex = R4_tmp; // index_check prefers index in R4
1277   const Register Rarray = R3_tmp;
1278   // D0_tos / R0_tos_lo:R1_to_hi: value
1279 
1280   __ pop_i(Rindex);
1281   index_check(Rarray, Rindex);
1282 
1283 #ifdef __SOFTFP__
1284   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1285   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1286   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1287 #else
1288   __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1289 #endif // __SOFTFP__
1290 }
1291 
1292 
1293 void TemplateTable::aastore() {
1294   transition(vtos, vtos);
1295   Label is_null, throw_array_store, done;
1296 
1297   const Register Raddr_1   = R1_tmp;
1298   const Register Rvalue_2  = R2_tmp;
1299   const Register Rarray_3  = R3_tmp;
1300   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1301   const Register Rsub_5    = R5_tmp;
1302   const Register Rsuper_LR = LR_tmp;
1303 
1304   // stack: ..., array, index, value
1305   __ ldr(Rvalue_2, at_tos());     // Value
1306   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1307   __ ldr(Rarray_3, at_tos_p2());  // Array
1308 
1309   index_check_without_pop(Rarray_3, Rindex_4);
1310 
1311   // Compute the array base
1312   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1313 
1314   // do array store check - check for NULL value first
1315   __ cbz(Rvalue_2, is_null);
1316 
1317   // Load subklass
1318   __ load_klass(Rsub_5, Rvalue_2);
1319   // Load superklass
1320   __ load_klass(Rtemp, Rarray_3);
1321   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1322 
1323   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1324   // Come here on success
1325 
1326   // Store value
1327   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1328 
1329   // Now store using the appropriate barrier
1330   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IN_HEAP_ARRAY);
1331   __ b(done);
1332 
1333   __ bind(throw_array_store);
1334 
1335   // Come here on failure of subtype check
1336   __ profile_typecheck_failed(R0_tmp);
1337 
1338   // object is at TOS
1339   __ b(Interpreter::_throw_ArrayStoreException_entry);
1340 
1341   // Have a NULL in Rvalue_2, store NULL at array[index].
1342   __ bind(is_null);
1343   __ profile_null_seen(R0_tmp);
1344 
1345   // Store a NULL
1346   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IN_HEAP_ARRAY);
1347 
1348   // Pop stack arguments
1349   __ bind(done);
1350   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1351 }
1352 
1353 
1354 void TemplateTable::bastore() {
1355   transition(itos, vtos);
1356   const Register Rindex = R4_tmp; // index_check prefers index in R4
1357   const Register Rarray = R3_tmp;
1358   // R0_tos: value
1359 
1360   __ pop_i(Rindex);
1361   index_check(Rarray, Rindex);
1362 
1363   // Need to check whether array is boolean or byte
1364   // since both types share the bastore bytecode.
1365   __ load_klass(Rtemp, Rarray);
1366   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1367   Label L_skip;
1368   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1369   __ b(L_skip, eq);
1370   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1371   __ bind(L_skip);
1372   __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1373 }
1374 
1375 
1376 void TemplateTable::castore() {
1377   transition(itos, vtos);
1378   const Register Rindex = R4_tmp; // index_check prefers index in R4
1379   const Register Rarray = R3_tmp;
1380   // R0_tos: value
1381 
1382   __ pop_i(Rindex);
1383   index_check(Rarray, Rindex);
1384 
1385   __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1386 }
1387 
1388 
1389 void TemplateTable::sastore() {
1390   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1391            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1392          "base offsets for char and short should be equal");
1393   castore();
1394 }
1395 
1396 
1397 void TemplateTable::istore(int n) {
1398   transition(itos, vtos);
1399   __ str_32(R0_tos, iaddress(n));
1400 }
1401 
1402 
1403 void TemplateTable::lstore(int n) {
1404   transition(ltos, vtos);
1405 #ifdef AARCH64
1406   __ str(R0_tos, laddress(n));
1407 #else
1408   __ str(R0_tos_lo, laddress(n));
1409   __ str(R1_tos_hi, haddress(n));
1410 #endif // AARCH64
1411 }
1412 
1413 
1414 void TemplateTable::fstore(int n) {
1415   transition(ftos, vtos);
1416 #ifdef __SOFTFP__
1417   __ str(R0_tos, faddress(n));
1418 #else
1419   __ str_float(S0_tos, faddress(n));
1420 #endif // __SOFTFP__
1421 }
1422 
1423 
1424 void TemplateTable::dstore(int n) {
1425   transition(dtos, vtos);
1426 #ifdef __SOFTFP__
1427   __ str(R0_tos_lo, laddress(n));
1428   __ str(R1_tos_hi, haddress(n));
1429 #else
1430   __ str_double(D0_tos, daddress(n));
1431 #endif // __SOFTFP__
1432 }
1433 
1434 
1435 void TemplateTable::astore(int n) {
1436   transition(vtos, vtos);
1437   __ pop_ptr(R0_tos);
1438   __ str(R0_tos, aaddress(n));
1439 }
1440 
1441 
1442 void TemplateTable::pop() {
1443   transition(vtos, vtos);
1444   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1445 }
1446 
1447 
1448 void TemplateTable::pop2() {
1449   transition(vtos, vtos);
1450   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1451 }
1452 
1453 
1454 void TemplateTable::dup() {
1455   transition(vtos, vtos);
1456   // stack: ..., a
1457   __ load_ptr(0, R0_tmp);
1458   __ push_ptr(R0_tmp);
1459   // stack: ..., a, a
1460 }
1461 
1462 
1463 void TemplateTable::dup_x1() {
1464   transition(vtos, vtos);
1465   // stack: ..., a, b
1466   __ load_ptr(0, R0_tmp);  // load b
1467   __ load_ptr(1, R2_tmp);  // load a
1468   __ store_ptr(1, R0_tmp); // store b
1469   __ store_ptr(0, R2_tmp); // store a
1470   __ push_ptr(R0_tmp);     // push b
1471   // stack: ..., b, a, b
1472 }
1473 
1474 
1475 void TemplateTable::dup_x2() {
1476   transition(vtos, vtos);
1477   // stack: ..., a, b, c
1478   __ load_ptr(0, R0_tmp);   // load c
1479   __ load_ptr(1, R2_tmp);   // load b
1480   __ load_ptr(2, R4_tmp);   // load a
1481 
1482   __ push_ptr(R0_tmp);      // push c
1483 
1484   // stack: ..., a, b, c, c
1485   __ store_ptr(1, R2_tmp);  // store b
1486   __ store_ptr(2, R4_tmp);  // store a
1487   __ store_ptr(3, R0_tmp);  // store c
1488   // stack: ..., c, a, b, c
1489 }
1490 
1491 
1492 void TemplateTable::dup2() {
1493   transition(vtos, vtos);
1494   // stack: ..., a, b
1495   __ load_ptr(1, R0_tmp);  // load a
1496   __ push_ptr(R0_tmp);     // push a
1497   __ load_ptr(1, R0_tmp);  // load b
1498   __ push_ptr(R0_tmp);     // push b
1499   // stack: ..., a, b, a, b
1500 }
1501 
1502 
1503 void TemplateTable::dup2_x1() {
1504   transition(vtos, vtos);
1505 
1506   // stack: ..., a, b, c
1507   __ load_ptr(0, R4_tmp);  // load c
1508   __ load_ptr(1, R2_tmp);  // load b
1509   __ load_ptr(2, R0_tmp);  // load a
1510 
1511   __ push_ptr(R2_tmp);     // push b
1512   __ push_ptr(R4_tmp);     // push c
1513 
1514   // stack: ..., a, b, c, b, c
1515 
1516   __ store_ptr(2, R0_tmp);  // store a
1517   __ store_ptr(3, R4_tmp);  // store c
1518   __ store_ptr(4, R2_tmp);  // store b
1519 
1520   // stack: ..., b, c, a, b, c
1521 }
1522 
1523 
1524 void TemplateTable::dup2_x2() {
1525   transition(vtos, vtos);
1526   // stack: ..., a, b, c, d
1527   __ load_ptr(0, R0_tmp);  // load d
1528   __ load_ptr(1, R2_tmp);  // load c
1529   __ push_ptr(R2_tmp);     // push c
1530   __ push_ptr(R0_tmp);     // push d
1531   // stack: ..., a, b, c, d, c, d
1532   __ load_ptr(4, R4_tmp);  // load b
1533   __ store_ptr(4, R0_tmp); // store d in b
1534   __ store_ptr(2, R4_tmp); // store b in d
1535   // stack: ..., a, d, c, b, c, d
1536   __ load_ptr(5, R4_tmp);  // load a
1537   __ store_ptr(5, R2_tmp); // store c in a
1538   __ store_ptr(3, R4_tmp); // store a in c
1539   // stack: ..., c, d, a, b, c, d
1540 }
1541 
1542 
1543 void TemplateTable::swap() {
1544   transition(vtos, vtos);
1545   // stack: ..., a, b
1546   __ load_ptr(1, R0_tmp);  // load a
1547   __ load_ptr(0, R2_tmp);  // load b
1548   __ store_ptr(0, R0_tmp); // store a in b
1549   __ store_ptr(1, R2_tmp); // store b in a
1550   // stack: ..., b, a
1551 }
1552 
1553 
1554 void TemplateTable::iop2(Operation op) {
1555   transition(itos, itos);
1556   const Register arg1 = R1_tmp;
1557   const Register arg2 = R0_tos;
1558 
1559   __ pop_i(arg1);
1560   switch (op) {
1561     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1562     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1563     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1564     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1565     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1566     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1567 #ifdef AARCH64
1568     case shl  : __ lslv_w (R0_tos, arg1, arg2); break;
1569     case shr  : __ asrv_w (R0_tos, arg1, arg2); break;
1570     case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1571 #else
1572     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1573     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1574     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1575 #endif // AARCH64
1576     default   : ShouldNotReachHere();
1577   }
1578 }
1579 
1580 
1581 void TemplateTable::lop2(Operation op) {
1582   transition(ltos, ltos);
1583 #ifdef AARCH64
1584   const Register arg1 = R1_tmp;
1585   const Register arg2 = R0_tos;
1586 
1587   __ pop_l(arg1);
1588   switch (op) {
1589     case add  : __ add (R0_tos, arg1, arg2); break;
1590     case sub  : __ sub (R0_tos, arg1, arg2); break;
1591     case _and : __ andr(R0_tos, arg1, arg2); break;
1592     case _or  : __ orr (R0_tos, arg1, arg2); break;
1593     case _xor : __ eor (R0_tos, arg1, arg2); break;
1594     default   : ShouldNotReachHere();
1595   }
1596 #else
1597   const Register arg1_lo = R2_tmp;
1598   const Register arg1_hi = R3_tmp;
1599   const Register arg2_lo = R0_tos_lo;
1600   const Register arg2_hi = R1_tos_hi;
1601 
1602   __ pop_l(arg1_lo, arg1_hi);
1603   switch (op) {
1604     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1605     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1606     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1607     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1608     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1609     default : ShouldNotReachHere();
1610   }
1611 #endif // AARCH64
1612 }
1613 
1614 
1615 void TemplateTable::idiv() {
1616   transition(itos, itos);
1617 #ifdef AARCH64
1618   const Register divisor = R0_tos;
1619   const Register dividend = R1_tmp;
1620 
1621   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1622   __ pop_i(dividend);
1623   __ sdiv_w(R0_tos, dividend, divisor);
1624 #else
1625   __ mov(R2, R0_tos);
1626   __ pop_i(R0);
1627   // R0 - dividend
1628   // R2 - divisor
1629   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1630   // R1 - result
1631   __ mov(R0_tos, R1);
1632 #endif // AARCH64
1633 }
1634 
1635 
1636 void TemplateTable::irem() {
1637   transition(itos, itos);
1638 #ifdef AARCH64
1639   const Register divisor = R0_tos;
1640   const Register dividend = R1_tmp;
1641   const Register quotient = R2_tmp;
1642 
1643   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1644   __ pop_i(dividend);
1645   __ sdiv_w(quotient, dividend, divisor);
1646   __ msub_w(R0_tos, divisor, quotient, dividend);
1647 #else
1648   __ mov(R2, R0_tos);
1649   __ pop_i(R0);
1650   // R0 - dividend
1651   // R2 - divisor
1652   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1653   // R0 - remainder
1654 #endif // AARCH64
1655 }
1656 
1657 
1658 void TemplateTable::lmul() {
1659   transition(ltos, ltos);
1660 #ifdef AARCH64
1661   const Register arg1 = R0_tos;
1662   const Register arg2 = R1_tmp;
1663 
1664   __ pop_l(arg2);
1665   __ mul(R0_tos, arg1, arg2);
1666 #else
1667   const Register arg1_lo = R0_tos_lo;
1668   const Register arg1_hi = R1_tos_hi;
1669   const Register arg2_lo = R2_tmp;
1670   const Register arg2_hi = R3_tmp;
1671 
1672   __ pop_l(arg2_lo, arg2_hi);
1673 
1674   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1675 #endif // AARCH64
1676 }
1677 
1678 
1679 void TemplateTable::ldiv() {
1680   transition(ltos, ltos);
1681 #ifdef AARCH64
1682   const Register divisor = R0_tos;
1683   const Register dividend = R1_tmp;
1684 
1685   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1686   __ pop_l(dividend);
1687   __ sdiv(R0_tos, dividend, divisor);
1688 #else
1689   const Register x_lo = R2_tmp;
1690   const Register x_hi = R3_tmp;
1691   const Register y_lo = R0_tos_lo;
1692   const Register y_hi = R1_tos_hi;
1693 
1694   __ pop_l(x_lo, x_hi);
1695 
1696   // check if y = 0
1697   __ orrs(Rtemp, y_lo, y_hi);
1698   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1699   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1700 #endif // AARCH64
1701 }
1702 
1703 
1704 void TemplateTable::lrem() {
1705   transition(ltos, ltos);
1706 #ifdef AARCH64
1707   const Register divisor = R0_tos;
1708   const Register dividend = R1_tmp;
1709   const Register quotient = R2_tmp;
1710 
1711   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1712   __ pop_l(dividend);
1713   __ sdiv(quotient, dividend, divisor);
1714   __ msub(R0_tos, divisor, quotient, dividend);
1715 #else
1716   const Register x_lo = R2_tmp;
1717   const Register x_hi = R3_tmp;
1718   const Register y_lo = R0_tos_lo;
1719   const Register y_hi = R1_tos_hi;
1720 
1721   __ pop_l(x_lo, x_hi);
1722 
1723   // check if y = 0
1724   __ orrs(Rtemp, y_lo, y_hi);
1725   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1726   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1727 #endif // AARCH64
1728 }
1729 
1730 
1731 void TemplateTable::lshl() {
1732   transition(itos, ltos);
1733 #ifdef AARCH64
1734   const Register val = R1_tmp;
1735   const Register shift_cnt = R0_tos;
1736   __ pop_l(val);
1737   __ lslv(R0_tos, val, shift_cnt);
1738 #else
1739   const Register shift_cnt = R4_tmp;
1740   const Register val_lo = R2_tmp;
1741   const Register val_hi = R3_tmp;
1742 
1743   __ pop_l(val_lo, val_hi);
1744   __ andr(shift_cnt, R0_tos, 63);
1745   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1746 #endif // AARCH64
1747 }
1748 
1749 
1750 void TemplateTable::lshr() {
1751   transition(itos, ltos);
1752 #ifdef AARCH64
1753   const Register val = R1_tmp;
1754   const Register shift_cnt = R0_tos;
1755   __ pop_l(val);
1756   __ asrv(R0_tos, val, shift_cnt);
1757 #else
1758   const Register shift_cnt = R4_tmp;
1759   const Register val_lo = R2_tmp;
1760   const Register val_hi = R3_tmp;
1761 
1762   __ pop_l(val_lo, val_hi);
1763   __ andr(shift_cnt, R0_tos, 63);
1764   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1765 #endif // AARCH64
1766 }
1767 
1768 
1769 void TemplateTable::lushr() {
1770   transition(itos, ltos);
1771 #ifdef AARCH64
1772   const Register val = R1_tmp;
1773   const Register shift_cnt = R0_tos;
1774   __ pop_l(val);
1775   __ lsrv(R0_tos, val, shift_cnt);
1776 #else
1777   const Register shift_cnt = R4_tmp;
1778   const Register val_lo = R2_tmp;
1779   const Register val_hi = R3_tmp;
1780 
1781   __ pop_l(val_lo, val_hi);
1782   __ andr(shift_cnt, R0_tos, 63);
1783   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1784 #endif // AARCH64
1785 }
1786 
1787 
1788 void TemplateTable::fop2(Operation op) {
1789   transition(ftos, ftos);
1790 #ifdef __SOFTFP__
1791   __ mov(R1, R0_tos);
1792   __ pop_i(R0);
1793   switch (op) {
1794     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1795     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1796     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1797     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1798     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1799     default : ShouldNotReachHere();
1800   }
1801 #else
1802   const FloatRegister arg1 = S1_tmp;
1803   const FloatRegister arg2 = S0_tos;
1804 
1805   switch (op) {
1806     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1807     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1808     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1809     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1810     case rem:
1811 #ifndef __ABI_HARD__
1812       __ pop_f(arg1);
1813       __ fmrs(R0, arg1);
1814       __ fmrs(R1, arg2);
1815       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1816       __ fmsr(S0_tos, R0);
1817 #else
1818       __ mov_float(S1_reg, arg2);
1819       __ pop_f(S0);
1820       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1821 #endif // !__ABI_HARD__
1822       break;
1823     default : ShouldNotReachHere();
1824   }
1825 #endif // __SOFTFP__
1826 }
1827 
1828 
1829 void TemplateTable::dop2(Operation op) {
1830   transition(dtos, dtos);
1831 #ifdef __SOFTFP__
1832   __ mov(R2, R0_tos_lo);
1833   __ mov(R3, R1_tos_hi);
1834   __ pop_l(R0, R1);
1835   switch (op) {
1836     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1837     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1838     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1839     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1840     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1841     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1842     default : ShouldNotReachHere();
1843   }
1844 #else
1845   const FloatRegister arg1 = D1_tmp;
1846   const FloatRegister arg2 = D0_tos;
1847 
1848   switch (op) {
1849     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1850     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1851     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1852     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1853     case rem:
1854 #ifndef __ABI_HARD__
1855       __ pop_d(arg1);
1856       __ fmrrd(R0, R1, arg1);
1857       __ fmrrd(R2, R3, arg2);
1858       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1859       __ fmdrr(D0_tos, R0, R1);
1860 #else
1861       __ mov_double(D1, arg2);
1862       __ pop_d(D0);
1863       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1864 #endif // !__ABI_HARD__
1865       break;
1866     default : ShouldNotReachHere();
1867   }
1868 #endif // __SOFTFP__
1869 }
1870 
1871 
1872 void TemplateTable::ineg() {
1873   transition(itos, itos);
1874   __ neg_32(R0_tos, R0_tos);
1875 }
1876 
1877 
1878 void TemplateTable::lneg() {
1879   transition(ltos, ltos);
1880 #ifdef AARCH64
1881   __ neg(R0_tos, R0_tos);
1882 #else
1883   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1884   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1885 #endif // AARCH64
1886 }
1887 
1888 
1889 void TemplateTable::fneg() {
1890   transition(ftos, ftos);
1891 #ifdef __SOFTFP__
1892   // Invert sign bit
1893   const int sign_mask = 0x80000000;
1894   __ eor(R0_tos, R0_tos, sign_mask);
1895 #else
1896   __ neg_float(S0_tos, S0_tos);
1897 #endif // __SOFTFP__
1898 }
1899 
1900 
1901 void TemplateTable::dneg() {
1902   transition(dtos, dtos);
1903 #ifdef __SOFTFP__
1904   // Invert sign bit in the high part of the double
1905   const int sign_mask_hi = 0x80000000;
1906   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1907 #else
1908   __ neg_double(D0_tos, D0_tos);
1909 #endif // __SOFTFP__
1910 }
1911 
1912 
1913 void TemplateTable::iinc() {
1914   transition(vtos, vtos);
1915   const Register Rconst = R2_tmp;
1916   const Register Rlocal_index = R1_tmp;
1917   const Register Rval = R0_tmp;
1918 
1919   __ ldrsb(Rconst, at_bcp(2));
1920   locals_index(Rlocal_index);
1921   Address local = load_iaddress(Rlocal_index, Rtemp);
1922   __ ldr_s32(Rval, local);
1923   __ add(Rval, Rval, Rconst);
1924   __ str_32(Rval, local);
1925 }
1926 
1927 
1928 void TemplateTable::wide_iinc() {
1929   transition(vtos, vtos);
1930   const Register Rconst = R2_tmp;
1931   const Register Rlocal_index = R1_tmp;
1932   const Register Rval = R0_tmp;
1933 
1934   // get constant in Rconst
1935   __ ldrsb(R2_tmp, at_bcp(4));
1936   __ ldrb(R3_tmp, at_bcp(5));
1937   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1938 
1939   locals_index_wide(Rlocal_index);
1940   Address local = load_iaddress(Rlocal_index, Rtemp);
1941   __ ldr_s32(Rval, local);
1942   __ add(Rval, Rval, Rconst);
1943   __ str_32(Rval, local);
1944 }
1945 
1946 
1947 void TemplateTable::convert() {
1948   // Checking
1949 #ifdef ASSERT
1950   { TosState tos_in  = ilgl;
1951     TosState tos_out = ilgl;
1952     switch (bytecode()) {
1953       case Bytecodes::_i2l: // fall through
1954       case Bytecodes::_i2f: // fall through
1955       case Bytecodes::_i2d: // fall through
1956       case Bytecodes::_i2b: // fall through
1957       case Bytecodes::_i2c: // fall through
1958       case Bytecodes::_i2s: tos_in = itos; break;
1959       case Bytecodes::_l2i: // fall through
1960       case Bytecodes::_l2f: // fall through
1961       case Bytecodes::_l2d: tos_in = ltos; break;
1962       case Bytecodes::_f2i: // fall through
1963       case Bytecodes::_f2l: // fall through
1964       case Bytecodes::_f2d: tos_in = ftos; break;
1965       case Bytecodes::_d2i: // fall through
1966       case Bytecodes::_d2l: // fall through
1967       case Bytecodes::_d2f: tos_in = dtos; break;
1968       default             : ShouldNotReachHere();
1969     }
1970     switch (bytecode()) {
1971       case Bytecodes::_l2i: // fall through
1972       case Bytecodes::_f2i: // fall through
1973       case Bytecodes::_d2i: // fall through
1974       case Bytecodes::_i2b: // fall through
1975       case Bytecodes::_i2c: // fall through
1976       case Bytecodes::_i2s: tos_out = itos; break;
1977       case Bytecodes::_i2l: // fall through
1978       case Bytecodes::_f2l: // fall through
1979       case Bytecodes::_d2l: tos_out = ltos; break;
1980       case Bytecodes::_i2f: // fall through
1981       case Bytecodes::_l2f: // fall through
1982       case Bytecodes::_d2f: tos_out = ftos; break;
1983       case Bytecodes::_i2d: // fall through
1984       case Bytecodes::_l2d: // fall through
1985       case Bytecodes::_f2d: tos_out = dtos; break;
1986       default             : ShouldNotReachHere();
1987     }
1988     transition(tos_in, tos_out);
1989   }
1990 #endif // ASSERT
1991 
1992   // Conversion
1993   switch (bytecode()) {
1994     case Bytecodes::_i2l:
1995 #ifdef AARCH64
1996       __ sign_extend(R0_tos, R0_tos, 32);
1997 #else
1998       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1999 #endif // AARCH64
2000       break;
2001 
2002     case Bytecodes::_i2f:
2003 #ifdef AARCH64
2004       __ scvtf_sw(S0_tos, R0_tos);
2005 #else
2006 #ifdef __SOFTFP__
2007       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
2008 #else
2009       __ fmsr(S0_tmp, R0_tos);
2010       __ fsitos(S0_tos, S0_tmp);
2011 #endif // __SOFTFP__
2012 #endif // AARCH64
2013       break;
2014 
2015     case Bytecodes::_i2d:
2016 #ifdef AARCH64
2017       __ scvtf_dw(D0_tos, R0_tos);
2018 #else
2019 #ifdef __SOFTFP__
2020       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
2021 #else
2022       __ fmsr(S0_tmp, R0_tos);
2023       __ fsitod(D0_tos, S0_tmp);
2024 #endif // __SOFTFP__
2025 #endif // AARCH64
2026       break;
2027 
2028     case Bytecodes::_i2b:
2029       __ sign_extend(R0_tos, R0_tos, 8);
2030       break;
2031 
2032     case Bytecodes::_i2c:
2033       __ zero_extend(R0_tos, R0_tos, 16);
2034       break;
2035 
2036     case Bytecodes::_i2s:
2037       __ sign_extend(R0_tos, R0_tos, 16);
2038       break;
2039 
2040     case Bytecodes::_l2i:
2041       /* nothing to do */
2042       break;
2043 
2044     case Bytecodes::_l2f:
2045 #ifdef AARCH64
2046       __ scvtf_sx(S0_tos, R0_tos);
2047 #else
2048       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
2049 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
2050       __ fmsr(S0_tos, R0);
2051 #endif // !__SOFTFP__ && !__ABI_HARD__
2052 #endif // AARCH64
2053       break;
2054 
2055     case Bytecodes::_l2d:
2056 #ifdef AARCH64
2057       __ scvtf_dx(D0_tos, R0_tos);
2058 #else
2059       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
2060 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
2061       __ fmdrr(D0_tos, R0, R1);
2062 #endif // !__SOFTFP__ && !__ABI_HARD__
2063 #endif // AARCH64
2064       break;
2065 
2066     case Bytecodes::_f2i:
2067 #ifdef AARCH64
2068       __ fcvtzs_ws(R0_tos, S0_tos);
2069 #else
2070 #ifndef __SOFTFP__
2071       __ ftosizs(S0_tos, S0_tos);
2072       __ fmrs(R0_tos, S0_tos);
2073 #else
2074       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
2075 #endif // !__SOFTFP__
2076 #endif // AARCH64
2077       break;
2078 
2079     case Bytecodes::_f2l:
2080 #ifdef AARCH64
2081       __ fcvtzs_xs(R0_tos, S0_tos);
2082 #else
2083 #ifndef __SOFTFP__
2084       __ fmrs(R0_tos, S0_tos);
2085 #endif // !__SOFTFP__
2086       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
2087 #endif // AARCH64
2088       break;
2089 
2090     case Bytecodes::_f2d:
2091 #ifdef __SOFTFP__
2092       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
2093 #else
2094       __ convert_f2d(D0_tos, S0_tos);
2095 #endif // __SOFTFP__
2096       break;
2097 
2098     case Bytecodes::_d2i:
2099 #ifdef AARCH64
2100       __ fcvtzs_wd(R0_tos, D0_tos);
2101 #else
2102 #ifndef __SOFTFP__
2103       __ ftosizd(Stemp, D0);
2104       __ fmrs(R0, Stemp);
2105 #else
2106       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
2107 #endif // !__SOFTFP__
2108 #endif // AARCH64
2109       break;
2110 
2111     case Bytecodes::_d2l:
2112 #ifdef AARCH64
2113       __ fcvtzs_xd(R0_tos, D0_tos);
2114 #else
2115 #ifndef __SOFTFP__
2116       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
2117 #endif // !__SOFTFP__
2118       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
2119 #endif // AARCH64
2120       break;
2121 
2122     case Bytecodes::_d2f:
2123 #ifdef __SOFTFP__
2124       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
2125 #else
2126       __ convert_d2f(S0_tos, D0_tos);
2127 #endif // __SOFTFP__
2128       break;
2129 
2130     default:
2131       ShouldNotReachHere();
2132   }
2133 }
2134 
2135 
2136 void TemplateTable::lcmp() {
2137   transition(ltos, itos);
2138 #ifdef AARCH64
2139   const Register arg1 = R1_tmp;
2140   const Register arg2 = R0_tos;
2141 
2142   __ pop_l(arg1);
2143 
2144   __ cmp(arg1, arg2);
2145   __ cset(R0_tos, gt);               // 1 if '>', else 0
2146   __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2147 #else
2148   const Register arg1_lo = R2_tmp;
2149   const Register arg1_hi = R3_tmp;
2150   const Register arg2_lo = R0_tos_lo;
2151   const Register arg2_hi = R1_tos_hi;
2152   const Register res = R4_tmp;
2153 
2154   __ pop_l(arg1_lo, arg1_hi);
2155 
2156   // long compare arg1 with arg2
2157   // result is -1/0/+1 if '<'/'='/'>'
2158   Label done;
2159 
2160   __ mov (res, 0);
2161   __ cmp (arg1_hi, arg2_hi);
2162   __ mvn (res, 0, lt);
2163   __ mov (res, 1, gt);
2164   __ b(done, ne);
2165   __ cmp (arg1_lo, arg2_lo);
2166   __ mvn (res, 0, lo);
2167   __ mov (res, 1, hi);
2168   __ bind(done);
2169   __ mov (R0_tos, res);
2170 #endif // AARCH64
2171 }
2172 
2173 
2174 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2175   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2176 
2177 #ifdef AARCH64
2178   if (is_float) {
2179     transition(ftos, itos);
2180     __ pop_f(S1_tmp);
2181     __ fcmp_s(S1_tmp, S0_tos);
2182   } else {
2183     transition(dtos, itos);
2184     __ pop_d(D1_tmp);
2185     __ fcmp_d(D1_tmp, D0_tos);
2186   }
2187 
2188   if (unordered_result < 0) {
2189     __ cset(R0_tos, gt);               // 1 if '>', else 0
2190     __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2191   } else {
2192     __ cset(R0_tos, hi);               // 1 if '>' or unordered, else 0
2193     __ csinv(R0_tos, R0_tos, ZR, pl);  // previous value if '>=' or unordered, else -1
2194   }
2195 
2196 #else
2197 
2198 #ifdef __SOFTFP__
2199 
2200   if (is_float) {
2201     transition(ftos, itos);
2202     const Register Rx = R0;
2203     const Register Ry = R1;
2204 
2205     __ mov(Ry, R0_tos);
2206     __ pop_i(Rx);
2207 
2208     if (unordered_result == 1) {
2209       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2210     } else {
2211       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2212     }
2213 
2214   } else {
2215 
2216     transition(dtos, itos);
2217     const Register Rx_lo = R0;
2218     const Register Rx_hi = R1;
2219     const Register Ry_lo = R2;
2220     const Register Ry_hi = R3;
2221 
2222     __ mov(Ry_lo, R0_tos_lo);
2223     __ mov(Ry_hi, R1_tos_hi);
2224     __ pop_l(Rx_lo, Rx_hi);
2225 
2226     if (unordered_result == 1) {
2227       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2228     } else {
2229       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2230     }
2231   }
2232 
2233 #else
2234 
2235   if (is_float) {
2236     transition(ftos, itos);
2237     __ pop_f(S1_tmp);
2238     __ fcmps(S1_tmp, S0_tos);
2239   } else {
2240     transition(dtos, itos);
2241     __ pop_d(D1_tmp);
2242     __ fcmpd(D1_tmp, D0_tos);
2243   }
2244 
2245   __ fmstat();
2246 
2247   // comparison result | flag N | flag Z | flag C | flag V
2248   // "<"               |   1    |   0    |   0    |   0
2249   // "=="              |   0    |   1    |   1    |   0
2250   // ">"               |   0    |   0    |   1    |   0
2251   // unordered         |   0    |   0    |   1    |   1
2252 
2253   if (unordered_result < 0) {
2254     __ mov(R0_tos, 1);           // result ==  1 if greater
2255     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2256   } else {
2257     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2258     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2259   }
2260   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2261 #endif // __SOFTFP__
2262 #endif // AARCH64
2263 }
2264 
2265 
2266 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2267 
2268   const Register Rdisp = R0_tmp;
2269   const Register Rbumped_taken_count = R5_tmp;
2270 
2271   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2272 
2273   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2274                              InvocationCounter::counter_offset();
2275   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2276                               InvocationCounter::counter_offset();
2277   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2278 
2279   // Load up R0 with the branch displacement
2280   if (is_wide) {
2281     __ ldrsb(R0_tmp, at_bcp(1));
2282     __ ldrb(R1_tmp, at_bcp(2));
2283     __ ldrb(R2_tmp, at_bcp(3));
2284     __ ldrb(R3_tmp, at_bcp(4));
2285     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2286     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2287     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2288   } else {
2289     __ ldrsb(R0_tmp, at_bcp(1));
2290     __ ldrb(R1_tmp, at_bcp(2));
2291     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2292   }
2293 
2294   // Handle all the JSR stuff here, then exit.
2295   // It's much shorter and cleaner than intermingling with the
2296   // non-JSR normal-branch stuff occuring below.
2297   if (is_jsr) {
2298     // compute return address as bci in R1
2299     const Register Rret_addr = R1_tmp;
2300     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2301 
2302     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2303     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2304     __ sub(Rret_addr, Rret_addr, Rtemp);
2305 
2306     // Load the next target bytecode into R3_bytecode and advance Rbcp
2307 #ifdef AARCH64
2308     __ add(Rbcp, Rbcp, Rdisp);
2309     __ ldrb(R3_bytecode, Address(Rbcp));
2310 #else
2311     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2312 #endif // AARCH64
2313 
2314     // Push return address
2315     __ push_i(Rret_addr);
2316     // jsr returns vtos
2317     __ dispatch_only_noverify(vtos);
2318     return;
2319   }
2320 
2321   // Normal (non-jsr) branch handling
2322 
2323   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2324 #ifdef AARCH64
2325   __ add(Rbcp, Rbcp, Rdisp);
2326   __ ldrb(R3_bytecode, Address(Rbcp));
2327 #else
2328   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2329 #endif // AARCH64
2330 
2331   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2332   Label backedge_counter_overflow;
2333   Label profile_method;
2334   Label dispatch;
2335 
2336   if (UseLoopCounter) {
2337     // increment backedge counter for backward branches
2338     // Rdisp (R0): target offset
2339 
2340     const Register Rcnt = R2_tmp;
2341     const Register Rcounters = R1_tmp;
2342 
2343     // count only if backward branch
2344 #ifdef AARCH64
2345     __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2346 #else
2347     __ tst(Rdisp, Rdisp);
2348     __ b(dispatch, pl);
2349 #endif // AARCH64
2350 
2351     if (TieredCompilation) {
2352       Label no_mdo;
2353       int increment = InvocationCounter::count_increment;
2354       if (ProfileInterpreter) {
2355         // Are we profiling?
2356         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2357         __ cbz(Rtemp, no_mdo);
2358         // Increment the MDO backedge counter
2359         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2360                                                   in_bytes(InvocationCounter::counter_offset()));
2361         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2362         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2363                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2364         __ b(dispatch);
2365       }
2366       __ bind(no_mdo);
2367       // Increment backedge counter in MethodCounters*
2368       // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2369       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2370                              Rdisp, R3_bytecode,
2371                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2372       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2373       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2374                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2375     } else {
2376       // Increment backedge counter in MethodCounters*
2377       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2378                              Rdisp, R3_bytecode,
2379                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2380       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2381       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2382       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2383 
2384       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2385 #ifdef AARCH64
2386       __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value);  // and the status bits
2387 #else
2388       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2389 #endif // AARCH64
2390       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2391 
2392       if (ProfileInterpreter) {
2393         // Test to see if we should create a method data oop
2394         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2395         __ ldr_s32(Rtemp, profile_limit);
2396         __ cmp_32(Rcnt, Rtemp);
2397         __ b(dispatch, lt);
2398 
2399         // if no method data exists, go to profile method
2400         __ test_method_data_pointer(R4_tmp, profile_method);
2401 
2402         if (UseOnStackReplacement) {
2403           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2404           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2405           __ ldr_s32(Rtemp, backward_branch_limit);
2406           __ cmp(Rbumped_taken_count, Rtemp);
2407           __ b(dispatch, lo);
2408 
2409           // When ProfileInterpreter is on, the backedge_count comes from the
2410           // MethodData*, which value does not get reset on the call to
2411           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2412           // routine while the method is being compiled, add a second test to make
2413           // sure the overflow function is called only once every overflow_frequency.
2414           const int overflow_frequency = 1024;
2415 
2416 #ifdef AARCH64
2417           __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2418 #else
2419           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2420           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2421           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2422 #endif // AARCH64
2423 
2424           __ b(backedge_counter_overflow, eq);
2425         }
2426       } else {
2427         if (UseOnStackReplacement) {
2428           // check for overflow against Rcnt, which is the sum of the counters
2429           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2430           __ ldr_s32(Rtemp, backward_branch_limit);
2431           __ cmp_32(Rcnt, Rtemp);
2432           __ b(backedge_counter_overflow, hs);
2433 
2434         }
2435       }
2436     }
2437     __ bind(dispatch);
2438   }
2439 
2440   if (!UseOnStackReplacement) {
2441     __ bind(backedge_counter_overflow);
2442   }
2443 
2444   // continue with the bytecode @ target
2445   __ dispatch_only(vtos);
2446 
2447   if (UseLoopCounter) {
2448     if (ProfileInterpreter) {
2449       // Out-of-line code to allocate method data oop.
2450       __ bind(profile_method);
2451 
2452       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2453       __ set_method_data_pointer_for_bcp();
2454       // reload next bytecode
2455       __ ldrb(R3_bytecode, Address(Rbcp));
2456       __ b(dispatch);
2457     }
2458 
2459     if (UseOnStackReplacement) {
2460       // invocation counter overflow
2461       __ bind(backedge_counter_overflow);
2462 
2463       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2464       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2465 
2466       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2467       const Register Rnmethod = R0;
2468 
2469       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2470 
2471       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2472 
2473       // nmethod may have been invalidated (VM may block upon call_VM return)
2474       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2475       __ cmp(R1_tmp, nmethod::in_use);
2476       __ b(dispatch, ne);
2477 
2478       // We have the address of an on stack replacement routine in Rnmethod,
2479       // We need to prepare to execute the OSR method. First we must
2480       // migrate the locals and monitors off of the stack.
2481 
2482       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2483 
2484       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2485 
2486       // R0 is OSR buffer
2487 
2488       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2489       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2490 
2491 #ifdef AARCH64
2492       __ ldp(FP, LR, Address(FP));
2493       __ mov(SP, Rtemp);
2494 #else
2495       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2496       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2497 #endif // AARCH64
2498 
2499       __ jump(R1_tmp);
2500     }
2501   }
2502 }
2503 
2504 
2505 void TemplateTable::if_0cmp(Condition cc) {
2506   transition(itos, vtos);
2507   // assume branch is more often taken than not (loops use backward branches)
2508   Label not_taken;
2509 #ifdef AARCH64
2510   if (cc == equal) {
2511     __ cbnz_w(R0_tos, not_taken);
2512   } else if (cc == not_equal) {
2513     __ cbz_w(R0_tos, not_taken);
2514   } else {
2515     __ cmp_32(R0_tos, 0);
2516     __ b(not_taken, convNegCond(cc));
2517   }
2518 #else
2519   __ cmp_32(R0_tos, 0);
2520   __ b(not_taken, convNegCond(cc));
2521 #endif // AARCH64
2522   branch(false, false);
2523   __ bind(not_taken);
2524   __ profile_not_taken_branch(R0_tmp);
2525 }
2526 
2527 
2528 void TemplateTable::if_icmp(Condition cc) {
2529   transition(itos, vtos);
2530   // assume branch is more often taken than not (loops use backward branches)
2531   Label not_taken;
2532   __ pop_i(R1_tmp);
2533   __ cmp_32(R1_tmp, R0_tos);
2534   __ b(not_taken, convNegCond(cc));
2535   branch(false, false);
2536   __ bind(not_taken);
2537   __ profile_not_taken_branch(R0_tmp);
2538 }
2539 
2540 
2541 void TemplateTable::if_nullcmp(Condition cc) {
2542   transition(atos, vtos);
2543   assert(cc == equal || cc == not_equal, "invalid condition");
2544 
2545   // assume branch is more often taken than not (loops use backward branches)
2546   Label not_taken;
2547   if (cc == equal) {
2548     __ cbnz(R0_tos, not_taken);
2549   } else {
2550     __ cbz(R0_tos, not_taken);
2551   }
2552   branch(false, false);
2553   __ bind(not_taken);
2554   __ profile_not_taken_branch(R0_tmp);
2555 }
2556 
2557 
2558 void TemplateTable::if_acmp(Condition cc) {
2559   transition(atos, vtos);
2560   // assume branch is more often taken than not (loops use backward branches)
2561   Label not_taken;
2562   __ pop_ptr(R1_tmp);
2563   __ cmp(R1_tmp, R0_tos);
2564   __ b(not_taken, convNegCond(cc));
2565   branch(false, false);
2566   __ bind(not_taken);
2567   __ profile_not_taken_branch(R0_tmp);
2568 }
2569 
2570 
2571 void TemplateTable::ret() {
2572   transition(vtos, vtos);
2573   const Register Rlocal_index = R1_tmp;
2574   const Register Rret_bci = Rtmp_save0; // R4/R19
2575 
2576   locals_index(Rlocal_index);
2577   Address local = load_iaddress(Rlocal_index, Rtemp);
2578   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2579   __ profile_ret(Rtmp_save1, Rret_bci);
2580   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2581   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2582   __ add(Rbcp, Rtemp, Rret_bci);
2583   __ dispatch_next(vtos);
2584 }
2585 
2586 
2587 void TemplateTable::wide_ret() {
2588   transition(vtos, vtos);
2589   const Register Rlocal_index = R1_tmp;
2590   const Register Rret_bci = Rtmp_save0; // R4/R19
2591 
2592   locals_index_wide(Rlocal_index);
2593   Address local = load_iaddress(Rlocal_index, Rtemp);
2594   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2595   __ profile_ret(Rtmp_save1, Rret_bci);
2596   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2597   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2598   __ add(Rbcp, Rtemp, Rret_bci);
2599   __ dispatch_next(vtos);
2600 }
2601 
2602 
2603 void TemplateTable::tableswitch() {
2604   transition(itos, vtos);
2605 
2606   const Register Rindex  = R0_tos;
2607 #ifndef AARCH64
2608   const Register Rtemp2  = R1_tmp;
2609 #endif // !AARCH64
2610   const Register Rabcp   = R2_tmp;  // aligned bcp
2611   const Register Rlow    = R3_tmp;
2612   const Register Rhigh   = R4_tmp;
2613   const Register Roffset = R5_tmp;
2614 
2615   // align bcp
2616   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2617   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2618 
2619   // load lo & hi
2620 #ifdef AARCH64
2621   __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2622 #else
2623   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2624 #endif // AARCH64
2625   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2626   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2627 
2628   // compare index with high bound
2629   __ cmp_32(Rhigh, Rindex);
2630 
2631 #ifdef AARCH64
2632   Label default_case, do_dispatch;
2633   __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2634   __ b(default_case, lt);
2635 
2636   __ sub_w(Rindex, Rindex, Rlow);
2637   __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2638   if(ProfileInterpreter) {
2639     __ sxtw(Rindex, Rindex);
2640     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2641   }
2642   __ b(do_dispatch);
2643 
2644   __ bind(default_case);
2645   __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2646   if(ProfileInterpreter) {
2647     __ profile_switch_default(R0_tmp);
2648   }
2649 
2650   __ bind(do_dispatch);
2651 #else
2652 
2653   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2654   __ subs(Rindex, Rindex, Rlow, ge);
2655 
2656   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2657   // ("ge" status accumulated from cmp and subs instructions) then load
2658   // offset from table, otherwise load offset for default case
2659 
2660   if(ProfileInterpreter) {
2661     Label default_case, continue_execution;
2662 
2663     __ b(default_case, lt);
2664     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2665     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2666     __ b(continue_execution);
2667 
2668     __ bind(default_case);
2669     __ profile_switch_default(R0_tmp);
2670     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2671 
2672     __ bind(continue_execution);
2673   } else {
2674     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2675     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2676   }
2677 #endif // AARCH64
2678 
2679   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2680 
2681   // load the next bytecode to R3_bytecode and advance Rbcp
2682 #ifdef AARCH64
2683   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2684   __ ldrb(R3_bytecode, Address(Rbcp));
2685 #else
2686   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2687 #endif // AARCH64
2688   __ dispatch_only(vtos);
2689 
2690 }
2691 
2692 
2693 void TemplateTable::lookupswitch() {
2694   transition(itos, itos);
2695   __ stop("lookupswitch bytecode should have been rewritten");
2696 }
2697 
2698 
2699 void TemplateTable::fast_linearswitch() {
2700   transition(itos, vtos);
2701   Label loop, found, default_case, continue_execution;
2702 
2703   const Register Rkey     = R0_tos;
2704   const Register Rabcp    = R2_tmp;  // aligned bcp
2705   const Register Rdefault = R3_tmp;
2706   const Register Rcount   = R4_tmp;
2707   const Register Roffset  = R5_tmp;
2708 
2709   // bswap Rkey, so we can avoid bswapping the table entries
2710   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2711 
2712   // align bcp
2713   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2714   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2715 
2716   // load default & counter
2717 #ifdef AARCH64
2718   __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2719 #else
2720   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2721 #endif // AARCH64
2722   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2723 
2724 #ifdef AARCH64
2725   __ cbz_w(Rcount, default_case);
2726 #else
2727   __ cmp_32(Rcount, 0);
2728   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2729   __ b(default_case, eq);
2730 #endif // AARCH64
2731 
2732   // table search
2733   __ bind(loop);
2734 #ifdef AARCH64
2735   __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2736 #endif // AARCH64
2737   __ cmp_32(Rtemp, Rkey);
2738   __ b(found, eq);
2739   __ subs(Rcount, Rcount, 1);
2740 #ifndef AARCH64
2741   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2742 #endif // !AARCH64
2743   __ b(loop, ne);
2744 
2745   // default case
2746   __ bind(default_case);
2747   __ profile_switch_default(R0_tmp);
2748   __ mov(Roffset, Rdefault);
2749   __ b(continue_execution);
2750 
2751   // entry found -> get offset
2752   __ bind(found);
2753   // Rabcp is already incremented and points to the next entry
2754   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2755   if (ProfileInterpreter) {
2756     // Calculate index of the selected case.
2757     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2758 
2759     // align bcp
2760     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2761     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2762 
2763     // load number of cases
2764     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2765     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2766 
2767     // Selected index = <number of cases> - <current loop count>
2768     __ sub(R1_tmp, R2_tmp, Rcount);
2769     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2770   }
2771 
2772   // continue execution
2773   __ bind(continue_execution);
2774   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2775 
2776   // load the next bytecode to R3_bytecode and advance Rbcp
2777 #ifdef AARCH64
2778   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2779   __ ldrb(R3_bytecode, Address(Rbcp));
2780 #else
2781   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2782 #endif // AARCH64
2783   __ dispatch_only(vtos);
2784 }
2785 
2786 
2787 void TemplateTable::fast_binaryswitch() {
2788   transition(itos, vtos);
2789   // Implementation using the following core algorithm:
2790   //
2791   // int binary_search(int key, LookupswitchPair* array, int n) {
2792   //   // Binary search according to "Methodik des Programmierens" by
2793   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2794   //   int i = 0;
2795   //   int j = n;
2796   //   while (i+1 < j) {
2797   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2798   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2799   //     // where a stands for the array and assuming that the (inexisting)
2800   //     // element a[n] is infinitely big.
2801   //     int h = (i + j) >> 1;
2802   //     // i < h < j
2803   //     if (key < array[h].fast_match()) {
2804   //       j = h;
2805   //     } else {
2806   //       i = h;
2807   //     }
2808   //   }
2809   //   // R: a[i] <= key < a[i+1] or Q
2810   //   // (i.e., if key is within array, i is the correct index)
2811   //   return i;
2812   // }
2813 
2814   // register allocation
2815   const Register key    = R0_tos;                // already set (tosca)
2816   const Register array  = R1_tmp;
2817   const Register i      = R2_tmp;
2818   const Register j      = R3_tmp;
2819   const Register h      = R4_tmp;
2820   const Register val    = R5_tmp;
2821   const Register temp1  = Rtemp;
2822   const Register temp2  = LR_tmp;
2823   const Register offset = R3_tmp;
2824 
2825   // set 'array' = aligned bcp + 2 ints
2826   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2827   __ align_reg(array, temp1, BytesPerInt);
2828 
2829   // initialize i & j
2830   __ mov(i, 0);                                  // i = 0;
2831   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2832   // Convert j into native byteordering
2833   __ byteswap_u32(j, temp1, temp2);
2834 
2835   // and start
2836   Label entry;
2837   __ b(entry);
2838 
2839   // binary search loop
2840   { Label loop;
2841     __ bind(loop);
2842     // int h = (i + j) >> 1;
2843     __ add(h, i, j);                             // h = i + j;
2844     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2845     // if (key < array[h].fast_match()) {
2846     //   j = h;
2847     // } else {
2848     //   i = h;
2849     // }
2850 #ifdef AARCH64
2851     __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2852     __ ldr_s32(val, Address(temp1));
2853 #else
2854     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2855 #endif // AARCH64
2856     // Convert array[h].match to native byte-ordering before compare
2857     __ byteswap_u32(val, temp1, temp2);
2858     __ cmp_32(key, val);
2859     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2860     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2861     // while (i+1 < j)
2862     __ bind(entry);
2863     __ add(temp1, i, 1);                             // i+1
2864     __ cmp(temp1, j);                                // i+1 < j
2865     __ b(loop, lt);
2866   }
2867 
2868   // end of binary search, result index is i (must check again!)
2869   Label default_case;
2870   // Convert array[i].match to native byte-ordering before compare
2871 #ifdef AARCH64
2872   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2873   __ ldr_s32(val, Address(temp1));
2874 #else
2875   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2876 #endif // AARCH64
2877   __ byteswap_u32(val, temp1, temp2);
2878   __ cmp_32(key, val);
2879   __ b(default_case, ne);
2880 
2881   // entry found
2882   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2883   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2884   __ profile_switch_case(R0, i, R1, i);
2885   __ byteswap_u32(offset, temp1, temp2);
2886 #ifdef AARCH64
2887   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2888   __ ldrb(R3_bytecode, Address(Rbcp));
2889 #else
2890   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2891 #endif // AARCH64
2892   __ dispatch_only(vtos);
2893 
2894   // default case
2895   __ bind(default_case);
2896   __ profile_switch_default(R0);
2897   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2898   __ byteswap_u32(offset, temp1, temp2);
2899 #ifdef AARCH64
2900   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2901   __ ldrb(R3_bytecode, Address(Rbcp));
2902 #else
2903   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2904 #endif // AARCH64
2905   __ dispatch_only(vtos);
2906 }
2907 
2908 
2909 void TemplateTable::_return(TosState state) {
2910   transition(state, state);
2911   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2912 
2913   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2914     Label skip_register_finalizer;
2915     assert(state == vtos, "only valid state");
2916     __ ldr(R1, aaddress(0));
2917     __ load_klass(Rtemp, R1);
2918     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2919     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2920 
2921     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2922 
2923     __ bind(skip_register_finalizer);
2924   }
2925 
2926   // Narrow result if state is itos but result type is smaller.
2927   // Need to narrow in the return bytecode rather than in generate_return_entry
2928   // since compiled code callers expect the result to already be narrowed.
2929   if (state == itos) {
2930     __ narrow(R0_tos);
2931   }
2932   __ remove_activation(state, LR);
2933 
2934   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2935 
2936 #ifndef AARCH64
2937   // According to interpreter calling conventions, result is returned in R0/R1,
2938   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2939   // This conversion should be done after remove_activation, as it uses
2940   // push(state) & pop(state) to preserve return value.
2941   __ convert_tos_to_retval(state);
2942 #endif // !AARCH64
2943 
2944   __ ret();
2945 
2946   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2947   __ nop();
2948 }
2949 
2950 
2951 // ----------------------------------------------------------------------------
2952 // Volatile variables demand their effects be made known to all CPU's in
2953 // order.  Store buffers on most chips allow reads & writes to reorder; the
2954 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2955 // memory barrier (i.e., it's not sufficient that the interpreter does not
2956 // reorder volatile references, the hardware also must not reorder them).
2957 //
2958 // According to the new Java Memory Model (JMM):
2959 // (1) All volatiles are serialized wrt to each other.
2960 // ALSO reads & writes act as aquire & release, so:
2961 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2962 // the read float up to before the read.  It's OK for non-volatile memory refs
2963 // that happen before the volatile read to float down below it.
2964 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2965 // that happen BEFORE the write float down to after the write.  It's OK for
2966 // non-volatile memory refs that happen after the volatile write to float up
2967 // before it.
2968 //
2969 // We only put in barriers around volatile refs (they are expensive), not
2970 // _between_ memory refs (that would require us to track the flavor of the
2971 // previous memory refs).  Requirements (2) and (3) require some barriers
2972 // before volatile stores and after volatile loads.  These nearly cover
2973 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2974 // case is placed after volatile-stores although it could just as well go
2975 // before volatile-loads.
2976 // TODO-AARCH64: consider removing extra unused parameters
2977 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2978                                      Register tmp,
2979                                      bool preserve_flags,
2980                                      Register load_tgt) {
2981 #ifdef AARCH64
2982   __ membar(order_constraint);
2983 #else
2984   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2985 #endif
2986 }
2987 
2988 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2989 void TemplateTable::resolve_cache_and_index(int byte_no,
2990                                             Register Rcache,
2991                                             Register Rindex,
2992                                             size_t index_size) {
2993   assert_different_registers(Rcache, Rindex, Rtemp);
2994 
2995   Label resolved;
2996   Bytecodes::Code code = bytecode();
2997   switch (code) {
2998   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2999   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
3000   }
3001 
3002   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
3003   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
3004   __ cmp(Rtemp, code);  // have we resolved this bytecode?
3005   __ b(resolved, eq);
3006 
3007   // resolve first time through
3008   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
3009   __ mov(R1, code);
3010   __ call_VM(noreg, entry, R1);
3011   // Update registers with resolved info
3012   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
3013   __ bind(resolved);
3014 }
3015 
3016 
3017 // The Rcache and Rindex registers must be set before call
3018 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
3019                                               Register Rindex,
3020                                               Register Roffset,
3021                                               Register Rflags,
3022                                               Register Robj,
3023                                               bool is_static = false) {
3024 
3025   assert_different_registers(Rcache, Rindex, Rtemp);
3026   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
3027 
3028   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3029 
3030   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3031 
3032   // Field offset
3033   __ ldr(Roffset, Address(Rtemp,
3034            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
3035 
3036   // Flags
3037   __ ldr_u32(Rflags, Address(Rtemp,
3038            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3039 
3040   if (is_static) {
3041     __ ldr(Robj, Address(Rtemp,
3042              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
3043     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3044     __ ldr(Robj, Address(Robj, mirror_offset));
3045     __ resolve_oop_handle(Robj);
3046   }
3047 }
3048 
3049 
3050 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
3051 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
3052                                                Register method,
3053                                                Register itable_index,
3054                                                Register flags,
3055                                                bool is_invokevirtual,
3056                                                bool is_invokevfinal/*unused*/,
3057                                                bool is_invokedynamic) {
3058   // setup registers
3059   const Register cache = R2_tmp;
3060   const Register index = R3_tmp;
3061   const Register temp_reg = Rtemp;
3062   assert_different_registers(cache, index, temp_reg);
3063   assert_different_registers(method, itable_index, temp_reg);
3064 
3065   // determine constant pool cache field offsets
3066   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
3067   const int method_offset = in_bytes(
3068     ConstantPoolCache::base_offset() +
3069       ((byte_no == f2_byte)
3070        ? ConstantPoolCacheEntry::f2_offset()
3071        : ConstantPoolCacheEntry::f1_offset()
3072       )
3073     );
3074   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
3075                                     ConstantPoolCacheEntry::flags_offset());
3076   // access constant pool cache fields
3077   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
3078                                     ConstantPoolCacheEntry::f2_offset());
3079 
3080   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
3081   resolve_cache_and_index(byte_no, cache, index, index_size);
3082     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
3083     __ ldr(method, Address(temp_reg, method_offset));
3084 
3085   if (itable_index != noreg) {
3086     __ ldr(itable_index, Address(temp_reg, index_offset));
3087   }
3088   __ ldr_u32(flags, Address(temp_reg, flags_offset));
3089 }
3090 
3091 
3092 // The registers cache and index expected to be set before call, and should not be Rtemp.
3093 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3094 // except cache and index registers which are preserved.
3095 void TemplateTable::jvmti_post_field_access(Register Rcache,
3096                                             Register Rindex,
3097                                             bool is_static,
3098                                             bool has_tos) {
3099   assert_different_registers(Rcache, Rindex, Rtemp);
3100 
3101   if (__ can_post_field_access()) {
3102     // Check to see if a field access watch has been set before we take
3103     // the time to call into the VM.
3104 
3105     Label Lcontinue;
3106 
3107     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
3108     __ cbz(Rtemp, Lcontinue);
3109 
3110     // cache entry pointer
3111     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3112     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
3113     if (is_static) {
3114       __ mov(R1, 0);        // NULL object reference
3115     } else {
3116       __ pop(atos);         // Get the object
3117       __ mov(R1, R0_tos);
3118       __ verify_oop(R1);
3119       __ push(atos);        // Restore stack state
3120     }
3121     // R1: object pointer or NULL
3122     // R2: cache entry pointer
3123     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3124                R1, R2);
3125     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3126 
3127     __ bind(Lcontinue);
3128   }
3129 }
3130 
3131 
3132 void TemplateTable::pop_and_check_object(Register r) {
3133   __ pop_ptr(r);
3134   __ null_check(r, Rtemp);  // for field access must check obj.
3135   __ verify_oop(r);
3136 }
3137 
3138 
3139 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3140   transition(vtos, vtos);
3141 
3142   const Register Roffset  = R2_tmp;
3143   const Register Robj     = R3_tmp;
3144   const Register Rcache   = R4_tmp;
3145   const Register Rflagsav = Rtmp_save0;  // R4/R19
3146   const Register Rindex   = R5_tmp;
3147   const Register Rflags   = R5_tmp;
3148 
3149   const bool gen_volatile_check = os::is_MP();
3150 
3151   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3152   jvmti_post_field_access(Rcache, Rindex, is_static, false);
3153   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3154 
3155   if (gen_volatile_check) {
3156     __ mov(Rflagsav, Rflags);
3157   }
3158 
3159   if (!is_static) pop_and_check_object(Robj);
3160 
3161   Label Done, Lint, Ltable, shouldNotReachHere;
3162   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3163 
3164   // compute type
3165   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3166   // Make sure we don't need to mask flags after the above shift
3167   ConstantPoolCacheEntry::verify_tos_state_shift();
3168 
3169   // There are actually two versions of implementation of getfield/getstatic:
3170   //
3171   // 32-bit ARM:
3172   // 1) Table switch using add(PC,...) instruction (fast_version)
3173   // 2) Table switch using ldr(PC,...) instruction
3174   //
3175   // AArch64:
3176   // 1) Table switch using adr/add/br instructions (fast_version)
3177   // 2) Table switch using adr/ldr/br instructions
3178   //
3179   // First version requires fixed size of code block for each case and
3180   // can not be used in RewriteBytecodes and VerifyOops
3181   // modes.
3182 
3183   // Size of fixed size code block for fast_version
3184   const int log_max_block_size = 2;
3185   const int max_block_size = 1 << log_max_block_size;
3186 
3187   // Decide if fast version is enabled
3188   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3189 
3190   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3191   // atos requires additional processing in slow version.
3192   // On AArch64 atos and itos cannot be merged.
3193   bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3194 
3195   assert(number_of_states == 10, "number of tos states should be equal to 9");
3196 
3197   __ cmp(Rflags, itos);
3198 #ifdef AARCH64
3199   __ b(Lint, eq);
3200 
3201   if(fast_version) {
3202     __ adr(Rtemp, Lbtos);
3203     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3204     __ br(Rtemp);
3205   } else {
3206     __ adr(Rtemp, Ltable);
3207     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3208     __ br(Rtemp);
3209   }
3210 #else
3211   if(atos_merged_with_itos) {
3212     __ cmp(Rflags, atos, ne);
3213   }
3214 
3215   // table switch by type
3216   if(fast_version) {
3217     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3218   } else {
3219     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3220   }
3221 
3222   // jump to itos/atos case
3223   __ b(Lint);
3224 #endif // AARCH64
3225 
3226   // table with addresses for slow version
3227   if (fast_version) {
3228     // nothing to do
3229   } else  {
3230     AARCH64_ONLY(__ align(wordSize));
3231     __ bind(Ltable);
3232     __ emit_address(Lbtos);
3233     __ emit_address(Lztos);
3234     __ emit_address(Lctos);
3235     __ emit_address(Lstos);
3236     __ emit_address(Litos);
3237     __ emit_address(Lltos);
3238     __ emit_address(Lftos);
3239     __ emit_address(Ldtos);
3240     __ emit_address(Latos);
3241   }
3242 
3243 #ifdef ASSERT
3244   int seq = 0;
3245 #endif
3246   // btos
3247   {
3248     assert(btos == seq++, "btos has unexpected value");
3249     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3250     __ bind(Lbtos);
3251     __ ldrsb(R0_tos, Address(Robj, Roffset));
3252     __ push(btos);
3253     // Rewrite bytecode to be faster
3254     if (!is_static && rc == may_rewrite) {
3255       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3256     }
3257     __ b(Done);
3258   }
3259 
3260   // ztos (same as btos for getfield)
3261   {
3262     assert(ztos == seq++, "btos has unexpected value");
3263     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3264     __ bind(Lztos);
3265     __ ldrsb(R0_tos, Address(Robj, Roffset));
3266     __ push(ztos);
3267     // Rewrite bytecode to be faster (use btos fast getfield)
3268     if (!is_static && rc == may_rewrite) {
3269       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3270     }
3271     __ b(Done);
3272   }
3273 
3274   // ctos
3275   {
3276     assert(ctos == seq++, "ctos has unexpected value");
3277     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3278     __ bind(Lctos);
3279     __ ldrh(R0_tos, Address(Robj, Roffset));
3280     __ push(ctos);
3281     if (!is_static && rc == may_rewrite) {
3282       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3283     }
3284     __ b(Done);
3285   }
3286 
3287   // stos
3288   {
3289     assert(stos == seq++, "stos has unexpected value");
3290     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3291     __ bind(Lstos);
3292     __ ldrsh(R0_tos, Address(Robj, Roffset));
3293     __ push(stos);
3294     if (!is_static && rc == may_rewrite) {
3295       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3296     }
3297     __ b(Done);
3298   }
3299 
3300   // itos
3301   {
3302     assert(itos == seq++, "itos has unexpected value");
3303     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3304     __ bind(Litos);
3305     __ b(shouldNotReachHere);
3306   }
3307 
3308   // ltos
3309   {
3310     assert(ltos == seq++, "ltos has unexpected value");
3311     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3312     __ bind(Lltos);
3313 #ifdef AARCH64
3314     __ ldr(R0_tos, Address(Robj, Roffset));
3315 #else
3316     __ add(Roffset, Robj, Roffset);
3317     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3318 #endif // AARCH64
3319     __ push(ltos);
3320     if (!is_static && rc == may_rewrite) {
3321       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3322     }
3323     __ b(Done);
3324   }
3325 
3326   // ftos
3327   {
3328     assert(ftos == seq++, "ftos has unexpected value");
3329     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3330     __ bind(Lftos);
3331     // floats and ints are placed on stack in same way, so
3332     // we can use push(itos) to transfer value without using VFP
3333     __ ldr_u32(R0_tos, Address(Robj, Roffset));
3334     __ push(itos);
3335     if (!is_static && rc == may_rewrite) {
3336       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3337     }
3338     __ b(Done);
3339   }
3340 
3341   // dtos
3342   {
3343     assert(dtos == seq++, "dtos has unexpected value");
3344     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3345     __ bind(Ldtos);
3346     // doubles and longs are placed on stack in the same way, so
3347     // we can use push(ltos) to transfer value without using VFP
3348 #ifdef AARCH64
3349     __ ldr(R0_tos, Address(Robj, Roffset));
3350 #else
3351     __ add(Rtemp, Robj, Roffset);
3352     __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3353 #endif // AARCH64
3354     __ push(ltos);
3355     if (!is_static && rc == may_rewrite) {
3356       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3357     }
3358     __ b(Done);
3359   }
3360 
3361   // atos
3362   {
3363     assert(atos == seq++, "atos has unexpected value");
3364 
3365     // atos case for AArch64 and slow version on 32-bit ARM
3366     if(!atos_merged_with_itos) {
3367       __ bind(Latos);
3368       do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3369       __ push(atos);
3370       // Rewrite bytecode to be faster
3371       if (!is_static && rc == may_rewrite) {
3372         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3373       }
3374       __ b(Done);
3375     }
3376   }
3377 
3378   assert(vtos == seq++, "vtos has unexpected value");
3379 
3380   __ bind(shouldNotReachHere);
3381   __ should_not_reach_here();
3382 
3383   // itos and atos cases are frequent so it makes sense to move them out of table switch
3384   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3385 
3386   __ bind(Lint);
3387   __ ldr_s32(R0_tos, Address(Robj, Roffset));
3388   __ push(itos);
3389   // Rewrite bytecode to be faster
3390   if (!is_static && rc == may_rewrite) {
3391     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3392   }
3393 
3394   __ bind(Done);
3395 
3396   if (gen_volatile_check) {
3397     // Check for volatile field
3398     Label notVolatile;
3399     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3400 
3401     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3402 
3403     __ bind(notVolatile);
3404   }
3405 
3406 }
3407 
3408 void TemplateTable::getfield(int byte_no) {
3409   getfield_or_static(byte_no, false);
3410 }
3411 
3412 void TemplateTable::nofast_getfield(int byte_no) {
3413   getfield_or_static(byte_no, false, may_not_rewrite);
3414 }
3415 
3416 void TemplateTable::getstatic(int byte_no) {
3417   getfield_or_static(byte_no, true);
3418 }
3419 
3420 
3421 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3422 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3423 // except cache and index registers which are preserved.
3424 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3425   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3426   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3427 
3428   if (__ can_post_field_modification()) {
3429     // Check to see if a field modification watch has been set before we take
3430     // the time to call into the VM.
3431     Label Lcontinue;
3432 
3433     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3434     __ cbz(Rtemp, Lcontinue);
3435 
3436     if (is_static) {
3437       // Life is simple.  Null out the object pointer.
3438       __ mov(R1, 0);
3439     } else {
3440       // Life is harder. The stack holds the value on top, followed by the object.
3441       // We don't know the size of the value, though; it could be one or two words
3442       // depending on its type. As a result, we must find the type to determine where
3443       // the object is.
3444 
3445       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3446       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3447 
3448       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3449       // Make sure we don't need to mask Rtemp after the above shift
3450       ConstantPoolCacheEntry::verify_tos_state_shift();
3451 
3452       __ cmp(Rtemp, ltos);
3453       __ cond_cmp(Rtemp, dtos, ne);
3454 #ifdef AARCH64
3455       __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3456       __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3457       __ mov(R1, Rtemp, eq);
3458       __ ldr(R1, Address(Rstack_top, R1));
3459 #else
3460       // two word value (ltos/dtos)
3461       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3462 
3463       // one word value (not ltos, dtos)
3464       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3465 #endif // AARCH64
3466     }
3467 
3468     // cache entry pointer
3469     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3470     __ add(R2, R2, in_bytes(cp_base_offset));
3471 
3472     // object (tos)
3473     __ mov(R3, Rstack_top);
3474 
3475     // R1: object pointer set up above (NULL if static)
3476     // R2: cache entry pointer
3477     // R3: value object on the stack
3478     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3479                R1, R2, R3);
3480     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3481 
3482     __ bind(Lcontinue);
3483   }
3484 }
3485 
3486 
3487 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3488   transition(vtos, vtos);
3489 
3490   const Register Roffset  = R2_tmp;
3491   const Register Robj     = R3_tmp;
3492   const Register Rcache   = R4_tmp;
3493   const Register Rflagsav = Rtmp_save0;  // R4/R19
3494   const Register Rindex   = R5_tmp;
3495   const Register Rflags   = R5_tmp;
3496 
3497   const bool gen_volatile_check = os::is_MP();
3498 
3499   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3500   jvmti_post_field_mod(Rcache, Rindex, is_static);
3501   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3502 
3503   if (gen_volatile_check) {
3504     // Check for volatile field
3505     Label notVolatile;
3506     __ mov(Rflagsav, Rflags);
3507     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3508 
3509     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3510 
3511     __ bind(notVolatile);
3512   }
3513 
3514   Label Done, Lint, shouldNotReachHere;
3515   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3516 
3517   // compute type
3518   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3519   // Make sure we don't need to mask flags after the above shift
3520   ConstantPoolCacheEntry::verify_tos_state_shift();
3521 
3522   // There are actually two versions of implementation of putfield/putstatic:
3523   //
3524   // 32-bit ARM:
3525   // 1) Table switch using add(PC,...) instruction (fast_version)
3526   // 2) Table switch using ldr(PC,...) instruction
3527   //
3528   // AArch64:
3529   // 1) Table switch using adr/add/br instructions (fast_version)
3530   // 2) Table switch using adr/ldr/br instructions
3531   //
3532   // First version requires fixed size of code block for each case and
3533   // can not be used in RewriteBytecodes and VerifyOops
3534   // modes.
3535 
3536   // Size of fixed size code block for fast_version (in instructions)
3537   const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3538   const int max_block_size = 1 << log_max_block_size;
3539 
3540   // Decide if fast version is enabled
3541   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3542 
3543   assert(number_of_states == 10, "number of tos states should be equal to 9");
3544 
3545   // itos case is frequent and is moved outside table switch
3546   __ cmp(Rflags, itos);
3547 
3548 #ifdef AARCH64
3549   __ b(Lint, eq);
3550 
3551   if (fast_version) {
3552     __ adr(Rtemp, Lbtos);
3553     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3554     __ br(Rtemp);
3555   } else {
3556     __ adr(Rtemp, Ltable);
3557     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3558     __ br(Rtemp);
3559   }
3560 #else
3561   // table switch by type
3562   if (fast_version) {
3563     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3564   } else  {
3565     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3566   }
3567 
3568   // jump to itos case
3569   __ b(Lint);
3570 #endif // AARCH64
3571 
3572   // table with addresses for slow version
3573   if (fast_version) {
3574     // nothing to do
3575   } else  {
3576     AARCH64_ONLY(__ align(wordSize));
3577     __ bind(Ltable);
3578     __ emit_address(Lbtos);
3579     __ emit_address(Lztos);
3580     __ emit_address(Lctos);
3581     __ emit_address(Lstos);
3582     __ emit_address(Litos);
3583     __ emit_address(Lltos);
3584     __ emit_address(Lftos);
3585     __ emit_address(Ldtos);
3586     __ emit_address(Latos);
3587   }
3588 
3589 #ifdef ASSERT
3590   int seq = 0;
3591 #endif
3592   // btos
3593   {
3594     assert(btos == seq++, "btos has unexpected value");
3595     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3596     __ bind(Lbtos);
3597     __ pop(btos);
3598     if (!is_static) pop_and_check_object(Robj);
3599     __ strb(R0_tos, Address(Robj, Roffset));
3600     if (!is_static && rc == may_rewrite) {
3601       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3602     }
3603     __ b(Done);
3604   }
3605 
3606   // ztos
3607   {
3608     assert(ztos == seq++, "ztos has unexpected value");
3609     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3610     __ bind(Lztos);
3611     __ pop(ztos);
3612     if (!is_static) pop_and_check_object(Robj);
3613     __ and_32(R0_tos, R0_tos, 1);
3614     __ strb(R0_tos, Address(Robj, Roffset));
3615     if (!is_static && rc == may_rewrite) {
3616       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3617     }
3618     __ b(Done);
3619   }
3620 
3621   // ctos
3622   {
3623     assert(ctos == seq++, "ctos has unexpected value");
3624     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3625     __ bind(Lctos);
3626     __ pop(ctos);
3627     if (!is_static) pop_and_check_object(Robj);
3628     __ strh(R0_tos, Address(Robj, Roffset));
3629     if (!is_static && rc == may_rewrite) {
3630       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3631     }
3632     __ b(Done);
3633   }
3634 
3635   // stos
3636   {
3637     assert(stos == seq++, "stos has unexpected value");
3638     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3639     __ bind(Lstos);
3640     __ pop(stos);
3641     if (!is_static) pop_and_check_object(Robj);
3642     __ strh(R0_tos, Address(Robj, Roffset));
3643     if (!is_static && rc == may_rewrite) {
3644       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3645     }
3646     __ b(Done);
3647   }
3648 
3649   // itos
3650   {
3651     assert(itos == seq++, "itos has unexpected value");
3652     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3653     __ bind(Litos);
3654     __ b(shouldNotReachHere);
3655   }
3656 
3657   // ltos
3658   {
3659     assert(ltos == seq++, "ltos has unexpected value");
3660     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3661     __ bind(Lltos);
3662     __ pop(ltos);
3663     if (!is_static) pop_and_check_object(Robj);
3664 #ifdef AARCH64
3665     __ str(R0_tos, Address(Robj, Roffset));
3666 #else
3667     __ add(Roffset, Robj, Roffset);
3668     __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3669 #endif // AARCH64
3670     if (!is_static && rc == may_rewrite) {
3671       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3672     }
3673     __ b(Done);
3674   }
3675 
3676   // ftos
3677   {
3678     assert(ftos == seq++, "ftos has unexpected value");
3679     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3680     __ bind(Lftos);
3681     // floats and ints are placed on stack in the same way, so
3682     // we can use pop(itos) to transfer value without using VFP
3683     __ pop(itos);
3684     if (!is_static) pop_and_check_object(Robj);
3685     __ str_32(R0_tos, Address(Robj, Roffset));
3686     if (!is_static && rc == may_rewrite) {
3687       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3688     }
3689     __ b(Done);
3690   }
3691 
3692   // dtos
3693   {
3694     assert(dtos == seq++, "dtos has unexpected value");
3695     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3696     __ bind(Ldtos);
3697     // doubles and longs are placed on stack in the same way, so
3698     // we can use pop(ltos) to transfer value without using VFP
3699     __ pop(ltos);
3700     if (!is_static) pop_and_check_object(Robj);
3701 #ifdef AARCH64
3702     __ str(R0_tos, Address(Robj, Roffset));
3703 #else
3704     __ add(Rtemp, Robj, Roffset);
3705     __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3706 #endif // AARCH64
3707     if (!is_static && rc == may_rewrite) {
3708       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3709     }
3710     __ b(Done);
3711   }
3712 
3713   // atos
3714   {
3715     assert(atos == seq++, "dtos has unexpected value");
3716     __ bind(Latos);
3717     __ pop(atos);
3718     if (!is_static) pop_and_check_object(Robj);
3719     // Store into the field
3720     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3721     if (!is_static && rc == may_rewrite) {
3722       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3723     }
3724     __ b(Done);
3725   }
3726 
3727   __ bind(shouldNotReachHere);
3728   __ should_not_reach_here();
3729 
3730   // itos case is frequent and is moved outside table switch
3731   __ bind(Lint);
3732   __ pop(itos);
3733   if (!is_static) pop_and_check_object(Robj);
3734   __ str_32(R0_tos, Address(Robj, Roffset));
3735   if (!is_static && rc == may_rewrite) {
3736     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3737   }
3738 
3739   __ bind(Done);
3740 
3741   if (gen_volatile_check) {
3742     Label notVolatile;
3743     if (is_static) {
3744       // Just check for volatile. Memory barrier for static final field
3745       // is handled by class initialization.
3746       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3747       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3748       __ bind(notVolatile);
3749     } else {
3750       // Check for volatile field and final field
3751       Label skipMembar;
3752 
3753       __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3754                        1 << ConstantPoolCacheEntry::is_final_shift);
3755       __ b(skipMembar, eq);
3756 
3757       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3758 
3759       // StoreLoad barrier after volatile field write
3760       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3761       __ b(skipMembar);
3762 
3763       // StoreStore barrier after final field write
3764       __ bind(notVolatile);
3765       volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3766 
3767       __ bind(skipMembar);
3768     }
3769   }
3770 
3771 }
3772 
3773 void TemplateTable::putfield(int byte_no) {
3774   putfield_or_static(byte_no, false);
3775 }
3776 
3777 void TemplateTable::nofast_putfield(int byte_no) {
3778   putfield_or_static(byte_no, false, may_not_rewrite);
3779 }
3780 
3781 void TemplateTable::putstatic(int byte_no) {
3782   putfield_or_static(byte_no, true);
3783 }
3784 
3785 
3786 void TemplateTable::jvmti_post_fast_field_mod() {
3787   // This version of jvmti_post_fast_field_mod() is not used on ARM
3788   Unimplemented();
3789 }
3790 
3791 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3792 // but preserves tosca with the given state.
3793 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3794   if (__ can_post_field_modification()) {
3795     // Check to see if a field modification watch has been set before we take
3796     // the time to call into the VM.
3797     Label done;
3798 
3799     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3800     __ cbz(R2, done);
3801 
3802     __ pop_ptr(R3);               // copy the object pointer from tos
3803     __ verify_oop(R3);
3804     __ push_ptr(R3);              // put the object pointer back on tos
3805 
3806     __ push(state);               // save value on the stack
3807 
3808     // access constant pool cache entry
3809     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3810 
3811     __ mov(R1, R3);
3812     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3813     __ mov(R3, Rstack_top); // put tos addr into R3
3814 
3815     // R1: object pointer copied above
3816     // R2: cache entry pointer
3817     // R3: jvalue object on the stack
3818     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3819 
3820     __ pop(state);                // restore value
3821 
3822     __ bind(done);
3823   }
3824 }
3825 
3826 
3827 void TemplateTable::fast_storefield(TosState state) {
3828   transition(state, vtos);
3829 
3830   ByteSize base = ConstantPoolCache::base_offset();
3831 
3832   jvmti_post_fast_field_mod(state);
3833 
3834   const Register Rcache  = R2_tmp;
3835   const Register Rindex  = R3_tmp;
3836   const Register Roffset = R3_tmp;
3837   const Register Rflags  = Rtmp_save0; // R4/R19
3838   const Register Robj    = R5_tmp;
3839 
3840   const bool gen_volatile_check = os::is_MP();
3841 
3842   // access constant pool cache
3843   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3844 
3845   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3846 
3847   if (gen_volatile_check) {
3848     // load flags to test volatile
3849     __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3850   }
3851 
3852   // replace index with field offset from cache entry
3853   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3854 
3855   if (gen_volatile_check) {
3856     // Check for volatile store
3857     Label notVolatile;
3858     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3859 
3860     // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3861     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3862 
3863     __ bind(notVolatile);
3864   }
3865 
3866   // Get object from stack
3867   pop_and_check_object(Robj);
3868 
3869   // access field
3870   switch (bytecode()) {
3871     case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3872                                      // fall through
3873     case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3874     case Bytecodes::_fast_sputfield: // fall through
3875     case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3876     case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3877 #ifdef AARCH64
3878     case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
3879     case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3880     case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3881 #else
3882     case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3883                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3884 
3885 #ifdef __SOFTFP__
3886     case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
3887     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3888                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3889 #else
3890     case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3891                                      __ fsts(S0_tos, Address(Robj));          break;
3892     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3893                                      __ fstd(D0_tos, Address(Robj));          break;
3894 #endif // __SOFTFP__
3895 #endif // AARCH64
3896 
3897     case Bytecodes::_fast_aputfield:
3898       do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3899       break;
3900 
3901     default:
3902       ShouldNotReachHere();
3903   }
3904 
3905   if (gen_volatile_check) {
3906     Label notVolatile;
3907     Label skipMembar;
3908     __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3909                    1 << ConstantPoolCacheEntry::is_final_shift);
3910     __ b(skipMembar, eq);
3911 
3912     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3913 
3914     // StoreLoad barrier after volatile field write
3915     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3916     __ b(skipMembar);
3917 
3918     // StoreStore barrier after final field write
3919     __ bind(notVolatile);
3920     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3921 
3922     __ bind(skipMembar);
3923   }
3924 }
3925 
3926 
3927 void TemplateTable::fast_accessfield(TosState state) {
3928   transition(atos, state);
3929 
3930   // do the JVMTI work here to avoid disturbing the register state below
3931   if (__ can_post_field_access()) {
3932     // Check to see if a field access watch has been set before we take
3933     // the time to call into the VM.
3934     Label done;
3935     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3936     __ cbz(R2, done);
3937     // access constant pool cache entry
3938     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3939     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3940     __ verify_oop(R0_tos);
3941     __ mov(R1, R0_tos);
3942     // R1: object pointer copied above
3943     // R2: cache entry pointer
3944     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3945     __ pop_ptr(R0_tos);   // restore object pointer
3946 
3947     __ bind(done);
3948   }
3949 
3950   const Register Robj    = R0_tos;
3951   const Register Rcache  = R2_tmp;
3952   const Register Rflags  = R2_tmp;
3953   const Register Rindex  = R3_tmp;
3954   const Register Roffset = R3_tmp;
3955 
3956   const bool gen_volatile_check = os::is_MP();
3957 
3958   // access constant pool cache
3959   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3960   // replace index with field offset from cache entry
3961   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3962   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3963 
3964   if (gen_volatile_check) {
3965     // load flags to test volatile
3966     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3967   }
3968 
3969   __ verify_oop(Robj);
3970   __ null_check(Robj, Rtemp);
3971 
3972   // access field
3973   switch (bytecode()) {
3974     case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
3975     case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
3976     case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
3977     case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
3978 #ifdef AARCH64
3979     case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3980     case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
3981     case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
3982 #else
3983     case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
3984                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3985 #ifdef __SOFTFP__
3986     case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3987     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
3988                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3989 #else
3990     case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
3991     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
3992 #endif // __SOFTFP__
3993 #endif // AARCH64
3994     case Bytecodes::_fast_agetfield: do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
3995     default:
3996       ShouldNotReachHere();
3997   }
3998 
3999   if (gen_volatile_check) {
4000     // Check for volatile load
4001     Label notVolatile;
4002     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4003 
4004     // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
4005     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4006 
4007     __ bind(notVolatile);
4008   }
4009 }
4010 
4011 
4012 void TemplateTable::fast_xaccess(TosState state) {
4013   transition(vtos, state);
4014 
4015   const Register Robj = R1_tmp;
4016   const Register Rcache = R2_tmp;
4017   const Register Rindex = R3_tmp;
4018   const Register Roffset = R3_tmp;
4019   const Register Rflags = R4_tmp;
4020   Label done;
4021 
4022   // get receiver
4023   __ ldr(Robj, aaddress(0));
4024 
4025   // access constant pool cache
4026   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
4027   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
4028   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
4029 
4030   const bool gen_volatile_check = os::is_MP();
4031 
4032   if (gen_volatile_check) {
4033     // load flags to test volatile
4034     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
4035   }
4036 
4037   // make sure exception is reported in correct bcp range (getfield is next instruction)
4038   __ add(Rbcp, Rbcp, 1);
4039   __ null_check(Robj, Rtemp);
4040   __ sub(Rbcp, Rbcp, 1);
4041 
4042 #ifdef AARCH64
4043   if (gen_volatile_check) {
4044     Label notVolatile;
4045     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4046 
4047     __ add(Rtemp, Robj, Roffset);
4048 
4049     if (state == itos) {
4050       __ ldar_w(R0_tos, Rtemp);
4051     } else if (state == atos) {
4052       if (UseCompressedOops) {
4053         __ ldar_w(R0_tos, Rtemp);
4054         __ decode_heap_oop(R0_tos);
4055       } else {
4056         __ ldar(R0_tos, Rtemp);
4057       }
4058       __ verify_oop(R0_tos);
4059     } else if (state == ftos) {
4060       __ ldar_w(R0_tos, Rtemp);
4061       __ fmov_sw(S0_tos, R0_tos);
4062     } else {
4063       ShouldNotReachHere();
4064     }
4065     __ b(done);
4066 
4067     __ bind(notVolatile);
4068   }
4069 #endif // AARCH64
4070 
4071   if (state == itos) {
4072     __ ldr_s32(R0_tos, Address(Robj, Roffset));
4073   } else if (state == atos) {
4074     do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
4075     __ verify_oop(R0_tos);
4076   } else if (state == ftos) {
4077 #ifdef AARCH64
4078     __ ldr_s(S0_tos, Address(Robj, Roffset));
4079 #else
4080 #ifdef __SOFTFP__
4081     __ ldr(R0_tos, Address(Robj, Roffset));
4082 #else
4083     __ add(Roffset, Robj, Roffset);
4084     __ flds(S0_tos, Address(Roffset));
4085 #endif // __SOFTFP__
4086 #endif // AARCH64
4087   } else {
4088     ShouldNotReachHere();
4089   }
4090 
4091 #ifndef AARCH64
4092   if (gen_volatile_check) {
4093     // Check for volatile load
4094     Label notVolatile;
4095     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4096 
4097     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4098 
4099     __ bind(notVolatile);
4100   }
4101 #endif // !AARCH64
4102 
4103   __ bind(done);
4104 }
4105 
4106 
4107 
4108 //----------------------------------------------------------------------------------------------------
4109 // Calls
4110 
4111 void TemplateTable::count_calls(Register method, Register temp) {
4112   // implemented elsewhere
4113   ShouldNotReachHere();
4114 }
4115 
4116 
4117 void TemplateTable::prepare_invoke(int byte_no,
4118                                    Register method,  // linked method (or i-klass)
4119                                    Register index,   // itable index, MethodType, etc.
4120                                    Register recv,    // if caller wants to see it
4121                                    Register flags    // if caller wants to test it
4122                                    ) {
4123   // determine flags
4124   const Bytecodes::Code code = bytecode();
4125   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
4126   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
4127   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
4128   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
4129   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
4130   const bool load_receiver       = (recv != noreg);
4131   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4132   assert(recv  == noreg || recv  == R2, "");
4133   assert(flags == noreg || flags == R3, "");
4134 
4135   // setup registers & access constant pool cache
4136   if (recv  == noreg)  recv  = R2;
4137   if (flags == noreg)  flags = R3;
4138   const Register temp = Rtemp;
4139   const Register ret_type = R1_tmp;
4140   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4141 
4142   // save 'interpreter return address'
4143   __ save_bcp();
4144 
4145   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4146 
4147   // maybe push extra argument
4148   if (is_invokedynamic || is_invokehandle) {
4149     Label L_no_push;
4150     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4151     __ mov(temp, index);
4152     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4153     __ load_resolved_reference_at_index(index, temp);
4154     __ verify_oop(index);
4155     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
4156     __ bind(L_no_push);
4157   }
4158 
4159   // load receiver if needed (after extra argument is pushed so parameter size is correct)
4160   if (load_receiver) {
4161     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
4162     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4163     __ ldr(recv, recv_addr);
4164     __ verify_oop(recv);
4165   }
4166 
4167   // compute return type
4168   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4169   // Make sure we don't need to mask flags after the above shift
4170   ConstantPoolCacheEntry::verify_tos_state_shift();
4171   // load return address
4172   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4173     __ mov_slow(temp, table);
4174     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4175   }
4176 }
4177 
4178 
4179 void TemplateTable::invokevirtual_helper(Register index,
4180                                          Register recv,
4181                                          Register flags) {
4182 
4183   const Register recv_klass = R2_tmp;
4184 
4185   assert_different_registers(index, recv, flags, Rtemp);
4186   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4187 
4188   // Test for an invoke of a final method
4189   Label notFinal;
4190   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4191 
4192   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4193 
4194   // do the call - the index is actually the method to call
4195 
4196   // It's final, need a null check here!
4197   __ null_check(recv, Rtemp);
4198 
4199   // profile this call
4200   __ profile_final_call(R0_tmp);
4201 
4202   __ jump_from_interpreted(Rmethod);
4203 
4204   __ bind(notFinal);
4205 
4206   // get receiver klass
4207   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4208   __ load_klass(recv_klass, recv);
4209 
4210   // profile this call
4211   __ profile_virtual_call(R0_tmp, recv_klass);
4212 
4213   // get target Method* & entry point
4214   const int base = in_bytes(Klass::vtable_start_offset());
4215   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4216   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4217   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4218   __ jump_from_interpreted(Rmethod);
4219 }
4220 
4221 void TemplateTable::invokevirtual(int byte_no) {
4222   transition(vtos, vtos);
4223   assert(byte_no == f2_byte, "use this argument");
4224 
4225   const Register Rrecv  = R2_tmp;
4226   const Register Rflags = R3_tmp;
4227 
4228   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4229 
4230   // Rmethod: index
4231   // Rrecv:   receiver
4232   // Rflags:  flags
4233   // LR:      return address
4234 
4235   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4236 }
4237 
4238 
4239 void TemplateTable::invokespecial(int byte_no) {
4240   transition(vtos, vtos);
4241   assert(byte_no == f1_byte, "use this argument");
4242   const Register Rrecv  = R2_tmp;
4243   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4244   __ verify_oop(Rrecv);
4245   __ null_check(Rrecv, Rtemp);
4246   // do the call
4247   __ profile_call(Rrecv);
4248   __ jump_from_interpreted(Rmethod);
4249 }
4250 
4251 
4252 void TemplateTable::invokestatic(int byte_no) {
4253   transition(vtos, vtos);
4254   assert(byte_no == f1_byte, "use this argument");
4255   prepare_invoke(byte_no, Rmethod);
4256   // do the call
4257   __ profile_call(R2_tmp);
4258   __ jump_from_interpreted(Rmethod);
4259 }
4260 
4261 
4262 void TemplateTable::fast_invokevfinal(int byte_no) {
4263   transition(vtos, vtos);
4264   assert(byte_no == f2_byte, "use this argument");
4265   __ stop("fast_invokevfinal is not used on ARM");
4266 }
4267 
4268 
4269 void TemplateTable::invokeinterface(int byte_no) {
4270   transition(vtos, vtos);
4271   assert(byte_no == f1_byte, "use this argument");
4272 
4273   const Register Ritable = R1_tmp;
4274   const Register Rrecv   = R2_tmp;
4275   const Register Rinterf = R5_tmp;
4276   const Register Rindex  = R4_tmp;
4277   const Register Rflags  = R3_tmp;
4278   const Register Rklass  = R3_tmp;
4279 
4280   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4281 
4282   // Special case of invokeinterface called for virtual method of
4283   // java.lang.Object.  See cpCacheOop.cpp for details.
4284   // This code isn't produced by javac, but could be produced by
4285   // another compliant java compiler.
4286   Label notMethod;
4287   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
4288 
4289   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4290   __ bind(notMethod);
4291 
4292   // Get receiver klass into Rklass - also a null check
4293   __ load_klass(Rklass, Rrecv);
4294 
4295   Label no_such_interface;
4296 
4297   // Receiver subtype check against REFC.
4298   __ lookup_interface_method(// inputs: rec. class, interface
4299                              Rklass, Rinterf, noreg,
4300                              // outputs:  scan temp. reg1, scan temp. reg2
4301                              noreg, Ritable, Rtemp,
4302                              no_such_interface);
4303 
4304   // profile this call
4305   __ profile_virtual_call(R0_tmp, Rklass);
4306 
4307   // Get declaring interface class from method
4308   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4309   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4310   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4311 
4312   // Get itable index from method
4313   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4314   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4315   __ neg(Rindex, Rtemp);
4316 
4317   __ lookup_interface_method(// inputs: rec. class, interface
4318                              Rklass, Rinterf, Rindex,
4319                              // outputs:  scan temp. reg1, scan temp. reg2
4320                              Rmethod, Ritable, Rtemp,
4321                              no_such_interface);
4322 
4323   // Rmethod: Method* to call
4324 
4325   // Check for abstract method error
4326   // Note: This should be done more efficiently via a throw_abstract_method_error
4327   //       interpreter entry point and a conditional jump to it in case of a null
4328   //       method.
4329   { Label L;
4330     __ cbnz(Rmethod, L);
4331     // throw exception
4332     // note: must restore interpreter registers to canonical
4333     //       state for exception handling to work correctly!
4334     __ restore_method();
4335     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4336     // the call_VM checks for exception, so we should never return here.
4337     __ should_not_reach_here();
4338     __ bind(L);
4339   }
4340 
4341   // do the call
4342   __ jump_from_interpreted(Rmethod);
4343 
4344   // throw exception
4345   __ bind(no_such_interface);
4346   __ restore_method();
4347   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4348   // the call_VM checks for exception, so we should never return here.
4349   __ should_not_reach_here();
4350 }
4351 
4352 void TemplateTable::invokehandle(int byte_no) {
4353   transition(vtos, vtos);
4354 
4355   // TODO-AARCH64 review register usage
4356   const Register Rrecv  = R2_tmp;
4357   const Register Rmtype = R4_tmp;
4358   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4359 
4360   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4361   __ null_check(Rrecv, Rtemp);
4362 
4363   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
4364   // Rmethod: MH.invokeExact_MT method (from f2)
4365 
4366   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
4367 
4368   // do the call
4369   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
4370   __ mov(Rmethod, R5_method);
4371   __ jump_from_interpreted(Rmethod);
4372 }
4373 
4374 void TemplateTable::invokedynamic(int byte_no) {
4375   transition(vtos, vtos);
4376 
4377   // TODO-AARCH64 review register usage
4378   const Register Rcallsite = R4_tmp;
4379   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4380 
4381   prepare_invoke(byte_no, R5_method, Rcallsite);
4382 
4383   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4384   // Rmethod:   MH.linkToCallSite method (from f2)
4385 
4386   // Note:  Rcallsite is already pushed by prepare_invoke
4387 
4388   if (ProfileInterpreter) {
4389     __ profile_call(R2_tmp);
4390   }
4391 
4392   // do the call
4393   __ mov(Rmethod, R5_method);
4394   __ jump_from_interpreted(Rmethod);
4395 }
4396 
4397 //----------------------------------------------------------------------------------------------------
4398 // Allocation
4399 
4400 void TemplateTable::_new() {
4401   transition(vtos, atos);
4402 
4403   const Register Robj   = R0_tos;
4404   const Register Rcpool = R1_tmp;
4405   const Register Rindex = R2_tmp;
4406   const Register Rtags  = R3_tmp;
4407   const Register Rsize  = R3_tmp;
4408 
4409   Register Rklass = R4_tmp;
4410   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4411   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4412 
4413   Label slow_case;
4414   Label done;
4415   Label initialize_header;
4416   Label initialize_object;  // including clearing the fields
4417 
4418   const bool allow_shared_alloc =
4419     Universe::heap()->supports_inline_contig_alloc();
4420 
4421   // Literals
4422   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4423 
4424   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4425   __ get_cpool_and_tags(Rcpool, Rtags);
4426 
4427   // Make sure the class we're about to instantiate has been resolved.
4428   // This is done before loading InstanceKlass to be consistent with the order
4429   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4430   const int tags_offset = Array<u1>::base_offset_in_bytes();
4431   __ add(Rtemp, Rtags, Rindex);
4432 
4433 #ifdef AARCH64
4434   __ add(Rtemp, Rtemp, tags_offset);
4435   __ ldarb(Rtemp, Rtemp);
4436 #else
4437   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4438 
4439   // use Rklass as a scratch
4440   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4441 #endif // AARCH64
4442 
4443   // get InstanceKlass
4444   __ cmp(Rtemp, JVM_CONSTANT_Class);
4445   __ b(slow_case, ne);
4446   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4447 
4448   // make sure klass is initialized & doesn't have finalizer
4449   // make sure klass is fully initialized
4450   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4451   __ cmp(Rtemp, InstanceKlass::fully_initialized);
4452   __ b(slow_case, ne);
4453 
4454   // get instance_size in InstanceKlass (scaled to a count of bytes)
4455   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4456 
4457   // test to see if it has a finalizer or is malformed in some way
4458   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4459   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4460 
4461   // Allocate the instance:
4462   //  If TLAB is enabled:
4463   //    Try to allocate in the TLAB.
4464   //    If fails, go to the slow path.
4465   //  Else If inline contiguous allocations are enabled:
4466   //    Try to allocate in eden.
4467   //    If fails due to heap end, go to slow path.
4468   //
4469   //  If TLAB is enabled OR inline contiguous is enabled:
4470   //    Initialize the allocation.
4471   //    Exit.
4472   //
4473   //  Go to slow path.
4474   if (UseTLAB) {
4475     const Register Rtlab_top = R1_tmp;
4476     const Register Rtlab_end = R2_tmp;
4477     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4478 
4479     __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4480     __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset())));
4481     __ add(Rtlab_top, Robj, Rsize);
4482     __ cmp(Rtlab_top, Rtlab_end);
4483     __ b(slow_case, hi);
4484     __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4485     if (ZeroTLAB) {
4486       // the fields have been already cleared
4487       __ b(initialize_header);
4488     } else {
4489       // initialize both the header and fields
4490       __ b(initialize_object);
4491     }
4492   } else {
4493     // Allocation in the shared Eden, if allowed.
4494     if (allow_shared_alloc) {
4495       const Register Rheap_top_addr = R2_tmp;
4496       const Register Rheap_top = R5_tmp;
4497       const Register Rheap_end = Rtemp;
4498       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4499 
4500       // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4501       __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4502 
4503       Label retry;
4504       __ bind(retry);
4505 
4506 #ifdef AARCH64
4507       __ ldxr(Robj, Rheap_top_addr);
4508 #else
4509       __ ldr(Robj, Address(Rheap_top_addr));
4510 #endif // AARCH64
4511 
4512       __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4513       __ add(Rheap_top, Robj, Rsize);
4514       __ cmp(Rheap_top, Rheap_end);
4515       __ b(slow_case, hi);
4516 
4517       // Update heap top atomically.
4518       // If someone beats us on the allocation, try again, otherwise continue.
4519 #ifdef AARCH64
4520       __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4521       __ cbnz_w(Rtemp2, retry);
4522 #else
4523       __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4524       __ b(retry, ne);
4525 #endif // AARCH64
4526 
4527       __ incr_allocated_bytes(Rsize, Rtemp);
4528     }
4529   }
4530 
4531   if (UseTLAB || allow_shared_alloc) {
4532     const Register Rzero0 = R1_tmp;
4533     const Register Rzero1 = R2_tmp;
4534     const Register Rzero_end = R5_tmp;
4535     const Register Rzero_cur = Rtemp;
4536     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4537 
4538     // The object is initialized before the header.  If the object size is
4539     // zero, go directly to the header initialization.
4540     __ bind(initialize_object);
4541     __ subs(Rsize, Rsize, sizeof(oopDesc));
4542     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4543     __ b(initialize_header, eq);
4544 
4545 #ifdef ASSERT
4546     // make sure Rsize is a multiple of 8
4547     Label L;
4548     __ tst(Rsize, 0x07);
4549     __ b(L, eq);
4550     __ stop("object size is not multiple of 8 - adjust this code");
4551     __ bind(L);
4552 #endif
4553 
4554 #ifdef AARCH64
4555     {
4556       Label loop;
4557       // Step back by 1 word if object size is not a multiple of 2*wordSize.
4558       assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4559       __ andr(Rtemp2, Rsize, (uintx)wordSize);
4560       __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4561 
4562       // Zero by 2 words per iteration.
4563       __ bind(loop);
4564       __ subs(Rsize, Rsize, 2*wordSize);
4565       __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4566       __ b(loop, gt);
4567     }
4568 #else
4569     __ mov(Rzero0, 0);
4570     __ mov(Rzero1, 0);
4571     __ add(Rzero_end, Rzero_cur, Rsize);
4572 
4573     // initialize remaining object fields: Rsize was a multiple of 8
4574     { Label loop;
4575       // loop is unrolled 2 times
4576       __ bind(loop);
4577       // #1
4578       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4579       __ cmp(Rzero_cur, Rzero_end);
4580       // #2
4581       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4582       __ cmp(Rzero_cur, Rzero_end, ne);
4583       __ b(loop, ne);
4584     }
4585 #endif // AARCH64
4586 
4587     // initialize object header only.
4588     __ bind(initialize_header);
4589     if (UseBiasedLocking) {
4590       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4591     } else {
4592       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4593     }
4594     // mark
4595     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4596 
4597     // klass
4598 #ifdef AARCH64
4599     __ store_klass_gap(Robj);
4600 #endif // AARCH64
4601     __ store_klass(Rklass, Robj); // blows Rklass:
4602     Rklass = noreg;
4603 
4604     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4605     if (DTraceAllocProbes) {
4606       // Trigger dtrace event for fastpath
4607       Label Lcontinue;
4608 
4609       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4610       __ cbz(Rtemp, Lcontinue);
4611 
4612       __ push(atos);
4613       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4614       __ pop(atos);
4615 
4616       __ bind(Lcontinue);
4617     }
4618 
4619     __ b(done);
4620   } else {
4621     // jump over literals
4622     __ b(slow_case);
4623   }
4624 
4625   if (allow_shared_alloc) {
4626     __ bind_literal(Lheap_top_addr);
4627   }
4628 
4629   // slow case
4630   __ bind(slow_case);
4631   __ get_constant_pool(Rcpool);
4632   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4633   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4634 
4635   // continue
4636   __ bind(done);
4637 
4638   // StoreStore barrier required after complete initialization
4639   // (headers + content zeroing), before the object may escape.
4640   __ membar(MacroAssembler::StoreStore, R1_tmp);
4641 }
4642 
4643 
4644 void TemplateTable::newarray() {
4645   transition(itos, atos);
4646   __ ldrb(R1, at_bcp(1));
4647   __ mov(R2, R0_tos);
4648   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4649   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4650 }
4651 
4652 
4653 void TemplateTable::anewarray() {
4654   transition(itos, atos);
4655   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4656   __ get_constant_pool(R1);
4657   __ mov(R3, R0_tos);
4658   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4659   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4660 }
4661 
4662 
4663 void TemplateTable::arraylength() {
4664   transition(atos, itos);
4665   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4666   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4667 }
4668 
4669 
4670 void TemplateTable::checkcast() {
4671   transition(atos, atos);
4672   Label done, is_null, quicked, resolved, throw_exception;
4673 
4674   const Register Robj = R0_tos;
4675   const Register Rcpool = R2_tmp;
4676   const Register Rtags = R3_tmp;
4677   const Register Rindex = R4_tmp;
4678   const Register Rsuper = R3_tmp;
4679   const Register Rsub   = R4_tmp;
4680   const Register Rsubtype_check_tmp1 = R1_tmp;
4681   const Register Rsubtype_check_tmp2 = LR_tmp;
4682 
4683   __ cbz(Robj, is_null);
4684 
4685   // Get cpool & tags index
4686   __ get_cpool_and_tags(Rcpool, Rtags);
4687   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4688 
4689   // See if bytecode has already been quicked
4690   __ add(Rtemp, Rtags, Rindex);
4691 #ifdef AARCH64
4692   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4693   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4694   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4695 #else
4696   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4697 #endif // AARCH64
4698 
4699   __ cmp(Rtemp, JVM_CONSTANT_Class);
4700 
4701 #ifndef AARCH64
4702   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4703 #endif // !AARCH64
4704 
4705   __ b(quicked, eq);
4706 
4707   __ push(atos);
4708   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4709   // vm_result_2 has metadata result
4710   __ get_vm_result_2(Rsuper, Robj);
4711   __ pop_ptr(Robj);
4712   __ b(resolved);
4713 
4714   __ bind(throw_exception);
4715   // Come here on failure of subtype check
4716   __ profile_typecheck_failed(R1_tmp);
4717   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4718   __ b(Interpreter::_throw_ClassCastException_entry);
4719 
4720   // Get superklass in Rsuper and subklass in Rsub
4721   __ bind(quicked);
4722   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4723 
4724   __ bind(resolved);
4725   __ load_klass(Rsub, Robj);
4726 
4727   // Generate subtype check. Blows both tmps and Rtemp.
4728   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4729   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4730 
4731   // Come here on success
4732 
4733   // Collect counts on whether this check-cast sees NULLs a lot or not.
4734   if (ProfileInterpreter) {
4735     __ b(done);
4736     __ bind(is_null);
4737     __ profile_null_seen(R1_tmp);
4738   } else {
4739     __ bind(is_null);   // same as 'done'
4740   }
4741   __ bind(done);
4742 }
4743 
4744 
4745 void TemplateTable::instanceof() {
4746   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4747   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4748 
4749   transition(atos, itos);
4750   Label done, is_null, not_subtype, quicked, resolved;
4751 
4752   const Register Robj = R0_tos;
4753   const Register Rcpool = R2_tmp;
4754   const Register Rtags = R3_tmp;
4755   const Register Rindex = R4_tmp;
4756   const Register Rsuper = R3_tmp;
4757   const Register Rsub   = R4_tmp;
4758   const Register Rsubtype_check_tmp1 = R0_tmp;
4759   const Register Rsubtype_check_tmp2 = R1_tmp;
4760 
4761   __ cbz(Robj, is_null);
4762 
4763   __ load_klass(Rsub, Robj);
4764 
4765   // Get cpool & tags index
4766   __ get_cpool_and_tags(Rcpool, Rtags);
4767   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4768 
4769   // See if bytecode has already been quicked
4770   __ add(Rtemp, Rtags, Rindex);
4771 #ifdef AARCH64
4772   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4773   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4774   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4775 #else
4776   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4777 #endif // AARCH64
4778   __ cmp(Rtemp, JVM_CONSTANT_Class);
4779 
4780 #ifndef AARCH64
4781   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4782 #endif // !AARCH64
4783 
4784   __ b(quicked, eq);
4785 
4786   __ push(atos);
4787   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4788   // vm_result_2 has metadata result
4789   __ get_vm_result_2(Rsuper, Robj);
4790   __ pop_ptr(Robj);
4791   __ b(resolved);
4792 
4793   // Get superklass in Rsuper and subklass in Rsub
4794   __ bind(quicked);
4795   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4796 
4797   __ bind(resolved);
4798   __ load_klass(Rsub, Robj);
4799 
4800   // Generate subtype check. Blows both tmps and Rtemp.
4801   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4802 
4803   // Come here on success
4804   __ mov(R0_tos, 1);
4805   __ b(done);
4806 
4807   __ bind(not_subtype);
4808   // Come here on failure
4809   __ profile_typecheck_failed(R1_tmp);
4810   __ mov(R0_tos, 0);
4811 
4812   // Collect counts on whether this test sees NULLs a lot or not.
4813   if (ProfileInterpreter) {
4814     __ b(done);
4815     __ bind(is_null);
4816     __ profile_null_seen(R1_tmp);
4817   } else {
4818     __ bind(is_null);   // same as 'done'
4819   }
4820   __ bind(done);
4821 }
4822 
4823 
4824 //----------------------------------------------------------------------------------------------------
4825 // Breakpoints
4826 void TemplateTable::_breakpoint() {
4827 
4828   // Note: We get here even if we are single stepping..
4829   // jbug inists on setting breakpoints at every bytecode
4830   // even if we are in single step mode.
4831 
4832   transition(vtos, vtos);
4833 
4834   // get the unpatched byte code
4835   __ mov(R1, Rmethod);
4836   __ mov(R2, Rbcp);
4837   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4838 #ifdef AARCH64
4839   __ sxtw(Rtmp_save0, R0);
4840 #else
4841   __ mov(Rtmp_save0, R0);
4842 #endif // AARCH64
4843 
4844   // post the breakpoint event
4845   __ mov(R1, Rmethod);
4846   __ mov(R2, Rbcp);
4847   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4848 
4849   // complete the execution of original bytecode
4850   __ mov(R3_bytecode, Rtmp_save0);
4851   __ dispatch_only_normal(vtos);
4852 }
4853 
4854 
4855 //----------------------------------------------------------------------------------------------------
4856 // Exceptions
4857 
4858 void TemplateTable::athrow() {
4859   transition(atos, vtos);
4860   __ mov(Rexception_obj, R0_tos);
4861   __ null_check(Rexception_obj, Rtemp);
4862   __ b(Interpreter::throw_exception_entry());
4863 }
4864 
4865 
4866 //----------------------------------------------------------------------------------------------------
4867 // Synchronization
4868 //
4869 // Note: monitorenter & exit are symmetric routines; which is reflected
4870 //       in the assembly code structure as well
4871 //
4872 // Stack layout:
4873 //
4874 // [expressions  ] <--- Rstack_top        = expression stack top
4875 // ..
4876 // [expressions  ]
4877 // [monitor entry] <--- monitor block top = expression stack bot
4878 // ..
4879 // [monitor entry]
4880 // [frame data   ] <--- monitor block bot
4881 // ...
4882 // [saved FP     ] <--- FP
4883 
4884 
4885 void TemplateTable::monitorenter() {
4886   transition(atos, vtos);
4887 
4888   const Register Robj = R0_tos;
4889   const Register Rentry = R1_tmp;
4890 
4891   // check for NULL object
4892   __ null_check(Robj, Rtemp);
4893 
4894   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4895   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4896   Label allocate_monitor, allocated;
4897 
4898   // initialize entry pointer
4899   __ mov(Rentry, 0);                             // points to free slot or NULL
4900 
4901   // find a free slot in the monitor block (result in Rentry)
4902   { Label loop, exit;
4903     const Register Rcur = R2_tmp;
4904     const Register Rcur_obj = Rtemp;
4905     const Register Rbottom = R3_tmp;
4906     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4907 
4908     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4909                                  // points to current entry, starting with top-most entry
4910     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4911                                  // points to word before bottom of monitor block
4912 
4913     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4914 #ifndef AARCH64
4915     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4916                                                  // prefetch monitor's object for the first iteration
4917 #endif // !AARCH64
4918     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4919 
4920     __ bind(loop);
4921 #ifdef AARCH64
4922     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4923 #endif // AARCH64
4924     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4925     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4926 
4927     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4928     __ b(exit, eq);                              // if same object then stop searching
4929 
4930     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4931 
4932     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4933 #ifndef AARCH64
4934     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4935                                                  // prefetch monitor's object for the next iteration
4936 #endif // !AARCH64
4937     __ b(loop, ne);                              // if not at bottom then check this entry
4938     __ bind(exit);
4939   }
4940 
4941   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4942 
4943   __ bind(allocate_monitor);
4944 
4945   // allocate one if there's no free slot
4946   { Label loop;
4947     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4948 
4949     // 1. compute new pointers
4950 
4951 #ifdef AARCH64
4952     __ check_extended_sp(Rtemp);
4953     __ sub(SP, SP, entry_size);                  // adjust extended SP
4954     __ mov(Rtemp, SP);
4955     __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
4956 #endif // AARCH64
4957 
4958     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4959                                                  // old monitor block top / expression stack bottom
4960 
4961     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4962     __ check_stack_top_on_expansion();
4963 
4964     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4965 
4966     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4967 
4968     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4969                                                  // set new monitor block top
4970 
4971     // 2. move expression stack contents
4972 
4973     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4974 #ifndef AARCH64
4975     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4976 #endif // !AARCH64
4977     __ b(allocated, eq);
4978 
4979     __ bind(loop);
4980 #ifdef AARCH64
4981     __ ldr(Rtemp, Address(R2_tmp, entry_size));             // load expression stack word from old location
4982 #endif // AARCH64
4983     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4984                                                             // and advance to next word
4985     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4986 #ifndef AARCH64
4987     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4988 #endif // !AARCH64
4989     __ b(loop, ne);                                         // if not at bottom then copy next word
4990   }
4991 
4992   // call run-time routine
4993 
4994   // Rentry: points to monitor entry
4995   __ bind(allocated);
4996 
4997   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4998   // The object has already been poped from the stack, so the expression stack looks correct.
4999   __ add(Rbcp, Rbcp, 1);
5000 
5001   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
5002   __ lock_object(Rentry);
5003 
5004   // check to make sure this monitor doesn't cause stack overflow after locking
5005   __ save_bcp();  // in case of exception
5006   __ arm_stack_overflow_check(0, Rtemp);
5007 
5008   // The bcp has already been incremented. Just need to dispatch to next instruction.
5009   __ dispatch_next(vtos);
5010 }
5011 
5012 
5013 void TemplateTable::monitorexit() {
5014   transition(atos, vtos);
5015 
5016   const Register Robj = R0_tos;
5017   const Register Rcur = R1_tmp;
5018   const Register Rbottom = R2_tmp;
5019   const Register Rcur_obj = Rtemp;
5020 
5021   // check for NULL object
5022   __ null_check(Robj, Rtemp);
5023 
5024   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
5025   Label found, throw_exception;
5026 
5027   // find matching slot
5028   { Label loop;
5029     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
5030 
5031     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
5032                                  // points to current entry, starting with top-most entry
5033     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
5034                                  // points to word before bottom of monitor block
5035 
5036     __ cmp(Rcur, Rbottom);                       // check if bottom reached
5037 #ifndef AARCH64
5038     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
5039                                                  // prefetch monitor's object for the first iteration
5040 #endif // !AARCH64
5041     __ b(throw_exception, eq);                   // throw exception if there are now monitors
5042 
5043     __ bind(loop);
5044 #ifdef AARCH64
5045     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
5046 #endif // AARCH64
5047     // check if current entry is for same object
5048     __ cmp(Rcur_obj, Robj);
5049     __ b(found, eq);                             // if same object then stop searching
5050     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
5051     __ cmp(Rcur, Rbottom);                       // check if bottom reached
5052 #ifndef AARCH64
5053     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
5054 #endif // !AARCH64
5055     __ b (loop, ne);                             // if not at bottom then check this entry
5056   }
5057 
5058   // error handling. Unlocking was not block-structured
5059   __ bind(throw_exception);
5060   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
5061   __ should_not_reach_here();
5062 
5063   // call run-time routine
5064   // Rcur: points to monitor entry
5065   __ bind(found);
5066   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
5067   __ unlock_object(Rcur);
5068   __ pop_ptr(Robj);                              // discard object
5069 }
5070 
5071 
5072 //----------------------------------------------------------------------------------------------------
5073 // Wide instructions
5074 
5075 void TemplateTable::wide() {
5076   transition(vtos, vtos);
5077   __ ldrb(R3_bytecode, at_bcp(1));
5078 
5079   InlinedAddress Ltable((address)Interpreter::_wentry_point);
5080   __ ldr_literal(Rtemp, Ltable);
5081   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
5082 
5083   __ nop(); // to avoid filling CPU pipeline with invalid instructions
5084   __ nop();
5085   __ bind_literal(Ltable);
5086 }
5087 
5088 
5089 //----------------------------------------------------------------------------------------------------
5090 // Multi arrays
5091 
5092 void TemplateTable::multianewarray() {
5093   transition(vtos, atos);
5094   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
5095 
5096   // last dim is on top of stack; we want address of first one:
5097   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
5098   // the latter wordSize to point to the beginning of the array.
5099   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5100   __ sub(R1, Rtemp, wordSize);
5101 
5102   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
5103   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5104   // MacroAssembler::StoreStore useless (included in the runtime exit path)
5105 }