1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "interpreter/interp_masm.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/cpCache.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 
  43 #define __ _masm->
  44 
  45 //----------------------------------------------------------------------------------------------------
  46 // Platform-dependent initialization
  47 
  48 void TemplateTable::pd_initialize() {
  49   // No arm specific initialization
  50 }
  51 
  52 //----------------------------------------------------------------------------------------------------
  53 // Address computation
  54 
  55 // local variables
  56 static inline Address iaddress(int n)            {
  57   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n)            { return iaddress(n + 1); }
  61 #ifndef AARCH64
  62 static inline Address haddress(int n)            { return iaddress(n + 0); }
  63 #endif // !AARCH64
  64 
  65 static inline Address faddress(int n)            { return iaddress(n); }
  66 static inline Address daddress(int n)            { return laddress(n); }
  67 static inline Address aaddress(int n)            { return iaddress(n); }
  68 
  69 
  70 void TemplateTable::get_local_base_addr(Register r, Register index) {
  71   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  72 }
  73 
  74 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  75 #ifdef AARCH64
  76   get_local_base_addr(scratch, index);
  77   return Address(scratch);
  78 #else
  79   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  80 #endif // AARCH64
  81 }
  82 
  83 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  84   return load_iaddress(index, scratch);
  85 }
  86 
  87 Address TemplateTable::load_faddress(Register index, Register scratch) {
  88 #ifdef __SOFTFP__
  89   return load_iaddress(index, scratch);
  90 #else
  91   get_local_base_addr(scratch, index);
  92   return Address(scratch);
  93 #endif // __SOFTFP__
  94 }
  95 
  96 Address TemplateTable::load_daddress(Register index, Register scratch) {
  97   get_local_base_addr(scratch, index);
  98   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  99 }
 100 
 101 // At top of Java expression stack which may be different than SP.
 102 // It isn't for category 1 objects.
 103 static inline Address at_tos() {
 104   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
 105 }
 106 
 107 static inline Address at_tos_p1() {
 108   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 109 }
 110 
 111 static inline Address at_tos_p2() {
 112   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 113 }
 114 
 115 
 116 // 32-bit ARM:
 117 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 118 // separate ldr instructions (supports nonadjacent values).
 119 // Used for longs in all modes, and for doubles in SOFTFP mode.
 120 //
 121 // AArch64: loads long local into R0_tos.
 122 //
 123 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 124   const Register Rlocal_base = tmp;
 125   assert_different_registers(Rlocal_index, tmp);
 126 
 127   get_local_base_addr(Rlocal_base, Rlocal_index);
 128 #ifdef AARCH64
 129   __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 130 #else
 131   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 132   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 133 #endif // AARCH64
 134 }
 135 
 136 
 137 // 32-bit ARM:
 138 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 139 // separate str instructions (supports nonadjacent values).
 140 // Used for longs in all modes, and for doubles in SOFTFP mode
 141 //
 142 // AArch64: stores R0_tos to long local.
 143 //
 144 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 145   const Register Rlocal_base = tmp;
 146   assert_different_registers(Rlocal_index, tmp);
 147 
 148   get_local_base_addr(Rlocal_base, Rlocal_index);
 149 #ifdef AARCH64
 150   __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 151 #else
 152   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 153   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 154 #endif // AARCH64
 155 }
 156 
 157 // Returns address of Java array element using temp register as address base.
 158 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 159   int logElemSize = exact_log2(type2aelembytes(elemType));
 160   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 161   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 162 }
 163 
 164 //----------------------------------------------------------------------------------------------------
 165 // Condition conversion
 166 AsmCondition convNegCond(TemplateTable::Condition cc) {
 167   switch (cc) {
 168     case TemplateTable::equal        : return ne;
 169     case TemplateTable::not_equal    : return eq;
 170     case TemplateTable::less         : return ge;
 171     case TemplateTable::less_equal   : return gt;
 172     case TemplateTable::greater      : return le;
 173     case TemplateTable::greater_equal: return lt;
 174   }
 175   ShouldNotReachHere();
 176   return nv;
 177 }
 178 
 179 //----------------------------------------------------------------------------------------------------
 180 // Miscelaneous helper routines
 181 
 182 // Store an oop (or NULL) at the address described by obj.
 183 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 184 // Also destroys new_val and obj.base().
 185 static void do_oop_store(InterpreterMacroAssembler* _masm,
 186                          Address obj,
 187                          Register new_val,
 188                          Register tmp1,
 189                          Register tmp2,
 190                          Register tmp3,
 191                          bool is_null,
 192                          DecoratorSet decorators = 0) {
 193 
 194   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 195   __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, is_null);
 196 }
 197 
 198 static void do_oop_load(InterpreterMacroAssembler* _masm,
 199                         Register dst,
 200                         Address obj,
 201                         DecoratorSet decorators = 0) {
 202   __ load_heap_oop(dst, obj, noreg, noreg, noreg);
 203 }
 204 
 205 Address TemplateTable::at_bcp(int offset) {
 206   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 207   return Address(Rbcp, offset);
 208 }
 209 
 210 
 211 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
 212 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 213                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 214                                    int byte_no) {
 215   assert_different_registers(bc_reg, temp_reg);
 216   if (!RewriteBytecodes)  return;
 217   Label L_patch_done;
 218 
 219   switch (bc) {
 220   case Bytecodes::_fast_aputfield:
 221   case Bytecodes::_fast_bputfield:
 222   case Bytecodes::_fast_zputfield:
 223   case Bytecodes::_fast_cputfield:
 224   case Bytecodes::_fast_dputfield:
 225   case Bytecodes::_fast_fputfield:
 226   case Bytecodes::_fast_iputfield:
 227   case Bytecodes::_fast_lputfield:
 228   case Bytecodes::_fast_sputfield:
 229     {
 230       // We skip bytecode quickening for putfield instructions when
 231       // the put_code written to the constant pool cache is zero.
 232       // This is required so that every execution of this instruction
 233       // calls out to InterpreterRuntime::resolve_get_put to do
 234       // additional, required work.
 235       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 236       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 237       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 238       __ mov(bc_reg, bc);
 239       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 240     }
 241     break;
 242   default:
 243     assert(byte_no == -1, "sanity");
 244     // the pair bytecodes have already done the load.
 245     if (load_bc_into_bc_reg) {
 246       __ mov(bc_reg, bc);
 247     }
 248   }
 249 
 250   if (__ can_post_breakpoint()) {
 251     Label L_fast_patch;
 252     // if a breakpoint is present we can't rewrite the stream directly
 253     __ ldrb(temp_reg, at_bcp(0));
 254     __ cmp(temp_reg, Bytecodes::_breakpoint);
 255     __ b(L_fast_patch, ne);
 256     if (bc_reg != R3) {
 257       __ mov(R3, bc_reg);
 258     }
 259     __ mov(R1, Rmethod);
 260     __ mov(R2, Rbcp);
 261     // Let breakpoint table handling rewrite to quicker bytecode
 262     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 263     __ b(L_patch_done);
 264     __ bind(L_fast_patch);
 265   }
 266 
 267 #ifdef ASSERT
 268   Label L_okay;
 269   __ ldrb(temp_reg, at_bcp(0));
 270   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 271   __ b(L_okay, eq);
 272   __ cmp(temp_reg, bc_reg);
 273   __ b(L_okay, eq);
 274   __ stop("patching the wrong bytecode");
 275   __ bind(L_okay);
 276 #endif
 277 
 278   // patch bytecode
 279   __ strb(bc_reg, at_bcp(0));
 280   __ bind(L_patch_done);
 281 }
 282 
 283 //----------------------------------------------------------------------------------------------------
 284 // Individual instructions
 285 
 286 void TemplateTable::nop() {
 287   transition(vtos, vtos);
 288   // nothing to do
 289 }
 290 
 291 void TemplateTable::shouldnotreachhere() {
 292   transition(vtos, vtos);
 293   __ stop("shouldnotreachhere bytecode");
 294 }
 295 
 296 
 297 
 298 void TemplateTable::aconst_null() {
 299   transition(vtos, atos);
 300   __ mov(R0_tos, 0);
 301 }
 302 
 303 
 304 void TemplateTable::iconst(int value) {
 305   transition(vtos, itos);
 306   __ mov_slow(R0_tos, value);
 307 }
 308 
 309 
 310 void TemplateTable::lconst(int value) {
 311   transition(vtos, ltos);
 312   assert((value == 0) || (value == 1), "unexpected long constant");
 313   __ mov(R0_tos, value);
 314 #ifndef AARCH64
 315   __ mov(R1_tos_hi, 0);
 316 #endif // !AARCH64
 317 }
 318 
 319 
 320 void TemplateTable::fconst(int value) {
 321   transition(vtos, ftos);
 322 #ifdef AARCH64
 323   switch(value) {
 324   case 0:   __ fmov_sw(S0_tos, ZR);    break;
 325   case 1:   __ fmov_s (S0_tos, 0x70);  break;
 326   case 2:   __ fmov_s (S0_tos, 0x00);  break;
 327   default:  ShouldNotReachHere();      break;
 328   }
 329 #else
 330   const int zero = 0;         // 0.0f
 331   const int one = 0x3f800000; // 1.0f
 332   const int two = 0x40000000; // 2.0f
 333 
 334   switch(value) {
 335   case 0:   __ mov(R0_tos, zero);   break;
 336   case 1:   __ mov(R0_tos, one);    break;
 337   case 2:   __ mov(R0_tos, two);    break;
 338   default:  ShouldNotReachHere();   break;
 339   }
 340 
 341 #ifndef __SOFTFP__
 342   __ fmsr(S0_tos, R0_tos);
 343 #endif // !__SOFTFP__
 344 #endif // AARCH64
 345 }
 346 
 347 
 348 void TemplateTable::dconst(int value) {
 349   transition(vtos, dtos);
 350 #ifdef AARCH64
 351   switch(value) {
 352   case 0:   __ fmov_dx(D0_tos, ZR);    break;
 353   case 1:   __ fmov_d (D0_tos, 0x70);  break;
 354   default:  ShouldNotReachHere();      break;
 355   }
 356 #else
 357   const int one_lo = 0;            // low part of 1.0
 358   const int one_hi = 0x3ff00000;   // high part of 1.0
 359 
 360   if (value == 0) {
 361 #ifdef __SOFTFP__
 362     __ mov(R0_tos_lo, 0);
 363     __ mov(R1_tos_hi, 0);
 364 #else
 365     __ mov(R0_tmp, 0);
 366     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 367 #endif // __SOFTFP__
 368   } else if (value == 1) {
 369     __ mov(R0_tos_lo, one_lo);
 370     __ mov_slow(R1_tos_hi, one_hi);
 371 #ifndef __SOFTFP__
 372     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 373 #endif // !__SOFTFP__
 374   } else {
 375     ShouldNotReachHere();
 376   }
 377 #endif // AARCH64
 378 }
 379 
 380 
 381 void TemplateTable::bipush() {
 382   transition(vtos, itos);
 383   __ ldrsb(R0_tos, at_bcp(1));
 384 }
 385 
 386 
 387 void TemplateTable::sipush() {
 388   transition(vtos, itos);
 389   __ ldrsb(R0_tmp, at_bcp(1));
 390   __ ldrb(R1_tmp, at_bcp(2));
 391   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 392 }
 393 
 394 
 395 void TemplateTable::ldc(bool wide) {
 396   transition(vtos, vtos);
 397   Label fastCase, Done;
 398 
 399   const Register Rindex = R1_tmp;
 400   const Register Rcpool = R2_tmp;
 401   const Register Rtags  = R3_tmp;
 402   const Register RtagType = R3_tmp;
 403 
 404   if (wide) {
 405     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 406   } else {
 407     __ ldrb(Rindex, at_bcp(1));
 408   }
 409   __ get_cpool_and_tags(Rcpool, Rtags);
 410 
 411   const int base_offset = ConstantPool::header_size() * wordSize;
 412   const int tags_offset = Array<u1>::base_offset_in_bytes();
 413 
 414   // get const type
 415   __ add(Rtemp, Rtags, tags_offset);
 416 #ifdef AARCH64
 417   __ add(Rtemp, Rtemp, Rindex);
 418   __ ldarb(RtagType, Rtemp);  // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
 419 #else
 420   __ ldrb(RtagType, Address(Rtemp, Rindex));
 421   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 422 #endif // AARCH64
 423 
 424   // unresolved class - get the resolved class
 425   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 426 
 427   // unresolved class in error (resolution failed) - call into runtime
 428   // so that the same error from first resolution attempt is thrown.
 429 #ifdef AARCH64
 430   __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
 431   __ cond_cmp(RtagType, Rtemp, ne);
 432 #else
 433   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 434 #endif // AARCH64
 435 
 436   // resolved class - need to call vm to get java mirror of the class
 437   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 438 
 439   __ b(fastCase, ne);
 440 
 441   // slow case - call runtime
 442   __ mov(R1, wide);
 443   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 444   __ push(atos);
 445   __ b(Done);
 446 
 447   // int, float, String
 448   __ bind(fastCase);
 449 #ifdef ASSERT
 450   { Label L;
 451     __ cmp(RtagType, JVM_CONSTANT_Integer);
 452     __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 453     __ b(L, eq);
 454     __ stop("unexpected tag type in ldc");
 455     __ bind(L);
 456   }
 457 #endif // ASSERT
 458   // itos, ftos
 459   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 460   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 461 
 462   // floats and ints are placed on stack in the same way, so
 463   // we can use push(itos) to transfer float value without VFP
 464   __ push(itos);
 465   __ bind(Done);
 466 }
 467 
 468 // Fast path for caching oop constants.
 469 void TemplateTable::fast_aldc(bool wide) {
 470   transition(vtos, atos);
 471   int index_size = wide ? sizeof(u2) : sizeof(u1);
 472   Label resolved;
 473 
 474   // We are resolved if the resolved reference cache entry contains a
 475   // non-null object (CallSite, etc.)
 476   assert_different_registers(R0_tos, R2_tmp);
 477   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 478   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 479   __ cbnz(R0_tos, resolved);
 480 
 481   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 482 
 483   // first time invocation - must resolve first
 484   __ mov(R1, (int)bytecode());
 485   __ call_VM(R0_tos, entry, R1);
 486   __ bind(resolved);
 487 
 488   if (VerifyOops) {
 489     __ verify_oop(R0_tos);
 490   }
 491 }
 492 
 493 void TemplateTable::ldc2_w() {
 494   transition(vtos, vtos);
 495   const Register Rtags  = R2_tmp;
 496   const Register Rindex = R3_tmp;
 497   const Register Rcpool = R4_tmp;
 498   const Register Rbase  = R5_tmp;
 499 
 500   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 501 
 502   __ get_cpool_and_tags(Rcpool, Rtags);
 503   const int base_offset = ConstantPool::header_size() * wordSize;
 504   const int tags_offset = Array<u1>::base_offset_in_bytes();
 505 
 506   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 507 
 508 #ifdef __ABI_HARD__
 509   Label Long, exit;
 510   // get type from tags
 511   __ add(Rtemp, Rtags, tags_offset);
 512   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 513   __ cmp(Rtemp, JVM_CONSTANT_Double);
 514   __ b(Long, ne);
 515   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 516 
 517   __ push(dtos);
 518   __ b(exit);
 519   __ bind(Long);
 520 #endif
 521 
 522 #ifdef AARCH64
 523   __ ldr(R0_tos, Address(Rbase, base_offset));
 524 #else
 525   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 526   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 527 #endif // AARCH64
 528   __ push(ltos);
 529 
 530 #ifdef __ABI_HARD__
 531   __ bind(exit);
 532 #endif
 533 }
 534 
 535 
 536 void TemplateTable::locals_index(Register reg, int offset) {
 537   __ ldrb(reg, at_bcp(offset));
 538 }
 539 
 540 void TemplateTable::iload() {
 541   iload_internal();
 542 }
 543 
 544 void TemplateTable::nofast_iload() {
 545   iload_internal(may_not_rewrite);
 546 }
 547 
 548 void TemplateTable::iload_internal(RewriteControl rc) {
 549   transition(vtos, itos);
 550 
 551   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 552     Label rewrite, done;
 553     const Register next_bytecode = R1_tmp;
 554     const Register target_bytecode = R2_tmp;
 555 
 556     // get next byte
 557     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 558     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 559     // last two iloads in a pair.  Comparing against fast_iload means that
 560     // the next bytecode is neither an iload or a caload, and therefore
 561     // an iload pair.
 562     __ cmp(next_bytecode, Bytecodes::_iload);
 563     __ b(done, eq);
 564 
 565     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 566     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 567     __ b(rewrite, eq);
 568 
 569     // if _caload, rewrite to fast_icaload
 570     __ cmp(next_bytecode, Bytecodes::_caload);
 571     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 572     __ b(rewrite, eq);
 573 
 574     // rewrite so iload doesn't check again.
 575     __ mov(target_bytecode, Bytecodes::_fast_iload);
 576 
 577     // rewrite
 578     // R2: fast bytecode
 579     __ bind(rewrite);
 580     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 581     __ bind(done);
 582   }
 583 
 584   // Get the local value into tos
 585   const Register Rlocal_index = R1_tmp;
 586   locals_index(Rlocal_index);
 587   Address local = load_iaddress(Rlocal_index, Rtemp);
 588   __ ldr_s32(R0_tos, local);
 589 }
 590 
 591 
 592 void TemplateTable::fast_iload2() {
 593   transition(vtos, itos);
 594   const Register Rlocal_index = R1_tmp;
 595 
 596   locals_index(Rlocal_index);
 597   Address local = load_iaddress(Rlocal_index, Rtemp);
 598   __ ldr_s32(R0_tos, local);
 599   __ push(itos);
 600 
 601   locals_index(Rlocal_index, 3);
 602   local = load_iaddress(Rlocal_index, Rtemp);
 603   __ ldr_s32(R0_tos, local);
 604 }
 605 
 606 void TemplateTable::fast_iload() {
 607   transition(vtos, itos);
 608   const Register Rlocal_index = R1_tmp;
 609 
 610   locals_index(Rlocal_index);
 611   Address local = load_iaddress(Rlocal_index, Rtemp);
 612   __ ldr_s32(R0_tos, local);
 613 }
 614 
 615 
 616 void TemplateTable::lload() {
 617   transition(vtos, ltos);
 618   const Register Rlocal_index = R2_tmp;
 619 
 620   locals_index(Rlocal_index);
 621   load_category2_local(Rlocal_index, R3_tmp);
 622 }
 623 
 624 
 625 void TemplateTable::fload() {
 626   transition(vtos, ftos);
 627   const Register Rlocal_index = R2_tmp;
 628 
 629   // Get the local value into tos
 630   locals_index(Rlocal_index);
 631   Address local = load_faddress(Rlocal_index, Rtemp);
 632 #ifdef __SOFTFP__
 633   __ ldr(R0_tos, local);
 634 #else
 635   __ ldr_float(S0_tos, local);
 636 #endif // __SOFTFP__
 637 }
 638 
 639 
 640 void TemplateTable::dload() {
 641   transition(vtos, dtos);
 642   const Register Rlocal_index = R2_tmp;
 643 
 644   locals_index(Rlocal_index);
 645 
 646 #ifdef __SOFTFP__
 647   load_category2_local(Rlocal_index, R3_tmp);
 648 #else
 649   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 650 #endif // __SOFTFP__
 651 }
 652 
 653 
 654 void TemplateTable::aload() {
 655   transition(vtos, atos);
 656   const Register Rlocal_index = R1_tmp;
 657 
 658   locals_index(Rlocal_index);
 659   Address local = load_aaddress(Rlocal_index, Rtemp);
 660   __ ldr(R0_tos, local);
 661 }
 662 
 663 
 664 void TemplateTable::locals_index_wide(Register reg) {
 665   assert_different_registers(reg, Rtemp);
 666   __ ldrb(Rtemp, at_bcp(2));
 667   __ ldrb(reg, at_bcp(3));
 668   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 669 }
 670 
 671 
 672 void TemplateTable::wide_iload() {
 673   transition(vtos, itos);
 674   const Register Rlocal_index = R2_tmp;
 675 
 676   locals_index_wide(Rlocal_index);
 677   Address local = load_iaddress(Rlocal_index, Rtemp);
 678   __ ldr_s32(R0_tos, local);
 679 }
 680 
 681 
 682 void TemplateTable::wide_lload() {
 683   transition(vtos, ltos);
 684   const Register Rlocal_index = R2_tmp;
 685   const Register Rlocal_base = R3_tmp;
 686 
 687   locals_index_wide(Rlocal_index);
 688   load_category2_local(Rlocal_index, R3_tmp);
 689 }
 690 
 691 
 692 void TemplateTable::wide_fload() {
 693   transition(vtos, ftos);
 694   const Register Rlocal_index = R2_tmp;
 695 
 696   locals_index_wide(Rlocal_index);
 697   Address local = load_faddress(Rlocal_index, Rtemp);
 698 #ifdef __SOFTFP__
 699   __ ldr(R0_tos, local);
 700 #else
 701   __ ldr_float(S0_tos, local);
 702 #endif // __SOFTFP__
 703 }
 704 
 705 
 706 void TemplateTable::wide_dload() {
 707   transition(vtos, dtos);
 708   const Register Rlocal_index = R2_tmp;
 709 
 710   locals_index_wide(Rlocal_index);
 711 #ifdef __SOFTFP__
 712   load_category2_local(Rlocal_index, R3_tmp);
 713 #else
 714   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 715 #endif // __SOFTFP__
 716 }
 717 
 718 
 719 void TemplateTable::wide_aload() {
 720   transition(vtos, atos);
 721   const Register Rlocal_index = R2_tmp;
 722 
 723   locals_index_wide(Rlocal_index);
 724   Address local = load_aaddress(Rlocal_index, Rtemp);
 725   __ ldr(R0_tos, local);
 726 }
 727 
 728 void TemplateTable::index_check(Register array, Register index) {
 729   // Pop ptr into array
 730   __ pop_ptr(array);
 731   index_check_without_pop(array, index);
 732 }
 733 
 734 void TemplateTable::index_check_without_pop(Register array, Register index) {
 735   assert_different_registers(array, index, Rtemp);
 736   // check array
 737   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 738   // check index
 739   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 740   __ cmp_32(index, Rtemp);
 741   if (index != R4_ArrayIndexOutOfBounds_index) {
 742     // convention with generate_ArrayIndexOutOfBounds_handler()
 743     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 744   }
 745   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 746 }
 747 
 748 
 749 void TemplateTable::iaload() {
 750   transition(itos, itos);
 751   const Register Rarray = R1_tmp;
 752   const Register Rindex = R0_tos;
 753 
 754   index_check(Rarray, Rindex);
 755   __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
 756 }
 757 
 758 
 759 void TemplateTable::laload() {
 760   transition(itos, ltos);
 761   const Register Rarray = R1_tmp;
 762   const Register Rindex = R0_tos;
 763 
 764   index_check(Rarray, Rindex);
 765 
 766 #ifdef AARCH64
 767   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 768 #else
 769   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 770   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
 771   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 772 #endif // AARCH64
 773 }
 774 
 775 
 776 void TemplateTable::faload() {
 777   transition(itos, ftos);
 778   const Register Rarray = R1_tmp;
 779   const Register Rindex = R0_tos;
 780 
 781   index_check(Rarray, Rindex);
 782 
 783   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
 784 #ifdef __SOFTFP__
 785   __ ldr(R0_tos, addr);
 786 #else
 787   __ ldr_float(S0_tos, addr);
 788 #endif // __SOFTFP__
 789 }
 790 
 791 
 792 void TemplateTable::daload() {
 793   transition(itos, dtos);
 794   const Register Rarray = R1_tmp;
 795   const Register Rindex = R0_tos;
 796 
 797   index_check(Rarray, Rindex);
 798 
 799 #ifdef __SOFTFP__
 800   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 801   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
 802   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 803 #else
 804   __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
 805 #endif // __SOFTFP__
 806 }
 807 
 808 
 809 void TemplateTable::aaload() {
 810   transition(itos, atos);
 811   const Register Rarray = R1_tmp;
 812   const Register Rindex = R0_tos;
 813 
 814   index_check(Rarray, Rindex);
 815   do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IN_HEAP_ARRAY);
 816 }
 817 
 818 
 819 void TemplateTable::baload() {
 820   transition(itos, itos);
 821   const Register Rarray = R1_tmp;
 822   const Register Rindex = R0_tos;
 823 
 824   index_check(Rarray, Rindex);
 825   __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
 826 }
 827 
 828 
 829 void TemplateTable::caload() {
 830   transition(itos, itos);
 831   const Register Rarray = R1_tmp;
 832   const Register Rindex = R0_tos;
 833 
 834   index_check(Rarray, Rindex);
 835   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 836 }
 837 
 838 
 839 // iload followed by caload frequent pair
 840 void TemplateTable::fast_icaload() {
 841   transition(vtos, itos);
 842   const Register Rlocal_index = R1_tmp;
 843   const Register Rarray = R1_tmp;
 844   const Register Rindex = R4_tmp; // index_check prefers index on R4
 845   assert_different_registers(Rlocal_index, Rindex);
 846   assert_different_registers(Rarray, Rindex);
 847 
 848   // load index out of locals
 849   locals_index(Rlocal_index);
 850   Address local = load_iaddress(Rlocal_index, Rtemp);
 851   __ ldr_s32(Rindex, local);
 852 
 853   // get array element
 854   index_check(Rarray, Rindex);
 855   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 856 }
 857 
 858 
 859 void TemplateTable::saload() {
 860   transition(itos, itos);
 861   const Register Rarray = R1_tmp;
 862   const Register Rindex = R0_tos;
 863 
 864   index_check(Rarray, Rindex);
 865   __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
 866 }
 867 
 868 
 869 void TemplateTable::iload(int n) {
 870   transition(vtos, itos);
 871   __ ldr_s32(R0_tos, iaddress(n));
 872 }
 873 
 874 
 875 void TemplateTable::lload(int n) {
 876   transition(vtos, ltos);
 877 #ifdef AARCH64
 878   __ ldr(R0_tos, laddress(n));
 879 #else
 880   __ ldr(R0_tos_lo, laddress(n));
 881   __ ldr(R1_tos_hi, haddress(n));
 882 #endif // AARCH64
 883 }
 884 
 885 
 886 void TemplateTable::fload(int n) {
 887   transition(vtos, ftos);
 888 #ifdef __SOFTFP__
 889   __ ldr(R0_tos, faddress(n));
 890 #else
 891   __ ldr_float(S0_tos, faddress(n));
 892 #endif // __SOFTFP__
 893 }
 894 
 895 
 896 void TemplateTable::dload(int n) {
 897   transition(vtos, dtos);
 898 #ifdef __SOFTFP__
 899   __ ldr(R0_tos_lo, laddress(n));
 900   __ ldr(R1_tos_hi, haddress(n));
 901 #else
 902   __ ldr_double(D0_tos, daddress(n));
 903 #endif // __SOFTFP__
 904 }
 905 
 906 
 907 void TemplateTable::aload(int n) {
 908   transition(vtos, atos);
 909   __ ldr(R0_tos, aaddress(n));
 910 }
 911 
 912 void TemplateTable::aload_0() {
 913   aload_0_internal();
 914 }
 915 
 916 void TemplateTable::nofast_aload_0() {
 917   aload_0_internal(may_not_rewrite);
 918 }
 919 
 920 void TemplateTable::aload_0_internal(RewriteControl rc) {
 921   transition(vtos, atos);
 922   // According to bytecode histograms, the pairs:
 923   //
 924   // _aload_0, _fast_igetfield
 925   // _aload_0, _fast_agetfield
 926   // _aload_0, _fast_fgetfield
 927   //
 928   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
 929   // bytecode checks if the next bytecode is either _fast_igetfield,
 930   // _fast_agetfield or _fast_fgetfield and then rewrites the
 931   // current bytecode into a pair bytecode; otherwise it rewrites the current
 932   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
 933   //
 934   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
 935   //       otherwise we may miss an opportunity for a pair.
 936   //
 937   // Also rewrite frequent pairs
 938   //   aload_0, aload_1
 939   //   aload_0, iload_1
 940   // These bytecodes with a small amount of code are most profitable to rewrite
 941   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 942     Label rewrite, done;
 943     const Register next_bytecode = R1_tmp;
 944     const Register target_bytecode = R2_tmp;
 945 
 946     // get next byte
 947     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 948 
 949     // if _getfield then wait with rewrite
 950     __ cmp(next_bytecode, Bytecodes::_getfield);
 951     __ b(done, eq);
 952 
 953     // if _igetfield then rewrite to _fast_iaccess_0
 954     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 955     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
 956     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
 957     __ b(rewrite, eq);
 958 
 959     // if _agetfield then rewrite to _fast_aaccess_0
 960     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 961     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
 962     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
 963     __ b(rewrite, eq);
 964 
 965     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
 966     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 967     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 968 
 969     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
 970 #ifdef AARCH64
 971     __ mov(Rtemp, Bytecodes::_fast_faccess_0);
 972     __ mov(target_bytecode, Bytecodes::_fast_aload_0);
 973     __ mov(target_bytecode, Rtemp, eq);
 974 #else
 975     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
 976     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
 977 #endif // AARCH64
 978 
 979     // rewrite
 980     __ bind(rewrite);
 981     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
 982 
 983     __ bind(done);
 984   }
 985 
 986   aload(0);
 987 }
 988 
 989 void TemplateTable::istore() {
 990   transition(itos, vtos);
 991   const Register Rlocal_index = R2_tmp;
 992 
 993   locals_index(Rlocal_index);
 994   Address local = load_iaddress(Rlocal_index, Rtemp);
 995   __ str_32(R0_tos, local);
 996 }
 997 
 998 
 999 void TemplateTable::lstore() {
1000   transition(ltos, vtos);
1001   const Register Rlocal_index = R2_tmp;
1002 
1003   locals_index(Rlocal_index);
1004   store_category2_local(Rlocal_index, R3_tmp);
1005 }
1006 
1007 
1008 void TemplateTable::fstore() {
1009   transition(ftos, vtos);
1010   const Register Rlocal_index = R2_tmp;
1011 
1012   locals_index(Rlocal_index);
1013   Address local = load_faddress(Rlocal_index, Rtemp);
1014 #ifdef __SOFTFP__
1015   __ str(R0_tos, local);
1016 #else
1017   __ str_float(S0_tos, local);
1018 #endif // __SOFTFP__
1019 }
1020 
1021 
1022 void TemplateTable::dstore() {
1023   transition(dtos, vtos);
1024   const Register Rlocal_index = R2_tmp;
1025 
1026   locals_index(Rlocal_index);
1027 
1028 #ifdef __SOFTFP__
1029   store_category2_local(Rlocal_index, R3_tmp);
1030 #else
1031   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1032 #endif // __SOFTFP__
1033 }
1034 
1035 
1036 void TemplateTable::astore() {
1037   transition(vtos, vtos);
1038   const Register Rlocal_index = R1_tmp;
1039 
1040   __ pop_ptr(R0_tos);
1041   locals_index(Rlocal_index);
1042   Address local = load_aaddress(Rlocal_index, Rtemp);
1043   __ str(R0_tos, local);
1044 }
1045 
1046 
1047 void TemplateTable::wide_istore() {
1048   transition(vtos, vtos);
1049   const Register Rlocal_index = R2_tmp;
1050 
1051   __ pop_i(R0_tos);
1052   locals_index_wide(Rlocal_index);
1053   Address local = load_iaddress(Rlocal_index, Rtemp);
1054   __ str_32(R0_tos, local);
1055 }
1056 
1057 
1058 void TemplateTable::wide_lstore() {
1059   transition(vtos, vtos);
1060   const Register Rlocal_index = R2_tmp;
1061   const Register Rlocal_base = R3_tmp;
1062 
1063 #ifdef AARCH64
1064   __ pop_l(R0_tos);
1065 #else
1066   __ pop_l(R0_tos_lo, R1_tos_hi);
1067 #endif // AARCH64
1068 
1069   locals_index_wide(Rlocal_index);
1070   store_category2_local(Rlocal_index, R3_tmp);
1071 }
1072 
1073 
1074 void TemplateTable::wide_fstore() {
1075   wide_istore();
1076 }
1077 
1078 
1079 void TemplateTable::wide_dstore() {
1080   wide_lstore();
1081 }
1082 
1083 
1084 void TemplateTable::wide_astore() {
1085   transition(vtos, vtos);
1086   const Register Rlocal_index = R2_tmp;
1087 
1088   __ pop_ptr(R0_tos);
1089   locals_index_wide(Rlocal_index);
1090   Address local = load_aaddress(Rlocal_index, Rtemp);
1091   __ str(R0_tos, local);
1092 }
1093 
1094 
1095 void TemplateTable::iastore() {
1096   transition(itos, vtos);
1097   const Register Rindex = R4_tmp; // index_check prefers index in R4
1098   const Register Rarray = R3_tmp;
1099   // R0_tos: value
1100 
1101   __ pop_i(Rindex);
1102   index_check(Rarray, Rindex);
1103   __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1104 }
1105 
1106 
1107 void TemplateTable::lastore() {
1108   transition(ltos, vtos);
1109   const Register Rindex = R4_tmp; // index_check prefers index in R4
1110   const Register Rarray = R3_tmp;
1111   // R0_tos_lo:R1_tos_hi: value
1112 
1113   __ pop_i(Rindex);
1114   index_check(Rarray, Rindex);
1115 
1116 #ifdef AARCH64
1117   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1118 #else
1119   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1120   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1121   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1122 #endif // AARCH64
1123 }
1124 
1125 
1126 void TemplateTable::fastore() {
1127   transition(ftos, vtos);
1128   const Register Rindex = R4_tmp; // index_check prefers index in R4
1129   const Register Rarray = R3_tmp;
1130   // S0_tos/R0_tos: value
1131 
1132   __ pop_i(Rindex);
1133   index_check(Rarray, Rindex);
1134   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1135 
1136 #ifdef __SOFTFP__
1137   __ str(R0_tos, addr);
1138 #else
1139   __ str_float(S0_tos, addr);
1140 #endif // __SOFTFP__
1141 }
1142 
1143 
1144 void TemplateTable::dastore() {
1145   transition(dtos, vtos);
1146   const Register Rindex = R4_tmp; // index_check prefers index in R4
1147   const Register Rarray = R3_tmp;
1148   // D0_tos / R0_tos_lo:R1_to_hi: value
1149 
1150   __ pop_i(Rindex);
1151   index_check(Rarray, Rindex);
1152 
1153 #ifdef __SOFTFP__
1154   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1155   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1156   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1157 #else
1158   __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1159 #endif // __SOFTFP__
1160 }
1161 
1162 
1163 void TemplateTable::aastore() {
1164   transition(vtos, vtos);
1165   Label is_null, throw_array_store, done;
1166 
1167   const Register Raddr_1   = R1_tmp;
1168   const Register Rvalue_2  = R2_tmp;
1169   const Register Rarray_3  = R3_tmp;
1170   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1171   const Register Rsub_5    = R5_tmp;
1172   const Register Rsuper_LR = LR_tmp;
1173 
1174   // stack: ..., array, index, value
1175   __ ldr(Rvalue_2, at_tos());     // Value
1176   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1177   __ ldr(Rarray_3, at_tos_p2());  // Array
1178 
1179   index_check_without_pop(Rarray_3, Rindex_4);
1180 
1181   // Compute the array base
1182   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1183 
1184   // do array store check - check for NULL value first
1185   __ cbz(Rvalue_2, is_null);
1186 
1187   // Load subklass
1188   __ load_klass(Rsub_5, Rvalue_2);
1189   // Load superklass
1190   __ load_klass(Rtemp, Rarray_3);
1191   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1192 
1193   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1194   // Come here on success
1195 
1196   // Store value
1197   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1198 
1199   // Now store using the appropriate barrier
1200   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IN_HEAP_ARRAY);
1201   __ b(done);
1202 
1203   __ bind(throw_array_store);
1204 
1205   // Come here on failure of subtype check
1206   __ profile_typecheck_failed(R0_tmp);
1207 
1208   // object is at TOS
1209   __ b(Interpreter::_throw_ArrayStoreException_entry);
1210 
1211   // Have a NULL in Rvalue_2, store NULL at array[index].
1212   __ bind(is_null);
1213   __ profile_null_seen(R0_tmp);
1214 
1215   // Store a NULL
1216   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IN_HEAP_ARRAY);
1217 
1218   // Pop stack arguments
1219   __ bind(done);
1220   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1221 }
1222 
1223 
1224 void TemplateTable::bastore() {
1225   transition(itos, vtos);
1226   const Register Rindex = R4_tmp; // index_check prefers index in R4
1227   const Register Rarray = R3_tmp;
1228   // R0_tos: value
1229 
1230   __ pop_i(Rindex);
1231   index_check(Rarray, Rindex);
1232 
1233   // Need to check whether array is boolean or byte
1234   // since both types share the bastore bytecode.
1235   __ load_klass(Rtemp, Rarray);
1236   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1237   Label L_skip;
1238   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1239   __ b(L_skip, eq);
1240   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1241   __ bind(L_skip);
1242   __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1243 }
1244 
1245 
1246 void TemplateTable::castore() {
1247   transition(itos, vtos);
1248   const Register Rindex = R4_tmp; // index_check prefers index in R4
1249   const Register Rarray = R3_tmp;
1250   // R0_tos: value
1251 
1252   __ pop_i(Rindex);
1253   index_check(Rarray, Rindex);
1254 
1255   __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1256 }
1257 
1258 
1259 void TemplateTable::sastore() {
1260   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1261            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1262          "base offsets for char and short should be equal");
1263   castore();
1264 }
1265 
1266 
1267 void TemplateTable::istore(int n) {
1268   transition(itos, vtos);
1269   __ str_32(R0_tos, iaddress(n));
1270 }
1271 
1272 
1273 void TemplateTable::lstore(int n) {
1274   transition(ltos, vtos);
1275 #ifdef AARCH64
1276   __ str(R0_tos, laddress(n));
1277 #else
1278   __ str(R0_tos_lo, laddress(n));
1279   __ str(R1_tos_hi, haddress(n));
1280 #endif // AARCH64
1281 }
1282 
1283 
1284 void TemplateTable::fstore(int n) {
1285   transition(ftos, vtos);
1286 #ifdef __SOFTFP__
1287   __ str(R0_tos, faddress(n));
1288 #else
1289   __ str_float(S0_tos, faddress(n));
1290 #endif // __SOFTFP__
1291 }
1292 
1293 
1294 void TemplateTable::dstore(int n) {
1295   transition(dtos, vtos);
1296 #ifdef __SOFTFP__
1297   __ str(R0_tos_lo, laddress(n));
1298   __ str(R1_tos_hi, haddress(n));
1299 #else
1300   __ str_double(D0_tos, daddress(n));
1301 #endif // __SOFTFP__
1302 }
1303 
1304 
1305 void TemplateTable::astore(int n) {
1306   transition(vtos, vtos);
1307   __ pop_ptr(R0_tos);
1308   __ str(R0_tos, aaddress(n));
1309 }
1310 
1311 
1312 void TemplateTable::pop() {
1313   transition(vtos, vtos);
1314   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1315 }
1316 
1317 
1318 void TemplateTable::pop2() {
1319   transition(vtos, vtos);
1320   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1321 }
1322 
1323 
1324 void TemplateTable::dup() {
1325   transition(vtos, vtos);
1326   // stack: ..., a
1327   __ load_ptr(0, R0_tmp);
1328   __ push_ptr(R0_tmp);
1329   // stack: ..., a, a
1330 }
1331 
1332 
1333 void TemplateTable::dup_x1() {
1334   transition(vtos, vtos);
1335   // stack: ..., a, b
1336   __ load_ptr(0, R0_tmp);  // load b
1337   __ load_ptr(1, R2_tmp);  // load a
1338   __ store_ptr(1, R0_tmp); // store b
1339   __ store_ptr(0, R2_tmp); // store a
1340   __ push_ptr(R0_tmp);     // push b
1341   // stack: ..., b, a, b
1342 }
1343 
1344 
1345 void TemplateTable::dup_x2() {
1346   transition(vtos, vtos);
1347   // stack: ..., a, b, c
1348   __ load_ptr(0, R0_tmp);   // load c
1349   __ load_ptr(1, R2_tmp);   // load b
1350   __ load_ptr(2, R4_tmp);   // load a
1351 
1352   __ push_ptr(R0_tmp);      // push c
1353 
1354   // stack: ..., a, b, c, c
1355   __ store_ptr(1, R2_tmp);  // store b
1356   __ store_ptr(2, R4_tmp);  // store a
1357   __ store_ptr(3, R0_tmp);  // store c
1358   // stack: ..., c, a, b, c
1359 }
1360 
1361 
1362 void TemplateTable::dup2() {
1363   transition(vtos, vtos);
1364   // stack: ..., a, b
1365   __ load_ptr(1, R0_tmp);  // load a
1366   __ push_ptr(R0_tmp);     // push a
1367   __ load_ptr(1, R0_tmp);  // load b
1368   __ push_ptr(R0_tmp);     // push b
1369   // stack: ..., a, b, a, b
1370 }
1371 
1372 
1373 void TemplateTable::dup2_x1() {
1374   transition(vtos, vtos);
1375 
1376   // stack: ..., a, b, c
1377   __ load_ptr(0, R4_tmp);  // load c
1378   __ load_ptr(1, R2_tmp);  // load b
1379   __ load_ptr(2, R0_tmp);  // load a
1380 
1381   __ push_ptr(R2_tmp);     // push b
1382   __ push_ptr(R4_tmp);     // push c
1383 
1384   // stack: ..., a, b, c, b, c
1385 
1386   __ store_ptr(2, R0_tmp);  // store a
1387   __ store_ptr(3, R4_tmp);  // store c
1388   __ store_ptr(4, R2_tmp);  // store b
1389 
1390   // stack: ..., b, c, a, b, c
1391 }
1392 
1393 
1394 void TemplateTable::dup2_x2() {
1395   transition(vtos, vtos);
1396   // stack: ..., a, b, c, d
1397   __ load_ptr(0, R0_tmp);  // load d
1398   __ load_ptr(1, R2_tmp);  // load c
1399   __ push_ptr(R2_tmp);     // push c
1400   __ push_ptr(R0_tmp);     // push d
1401   // stack: ..., a, b, c, d, c, d
1402   __ load_ptr(4, R4_tmp);  // load b
1403   __ store_ptr(4, R0_tmp); // store d in b
1404   __ store_ptr(2, R4_tmp); // store b in d
1405   // stack: ..., a, d, c, b, c, d
1406   __ load_ptr(5, R4_tmp);  // load a
1407   __ store_ptr(5, R2_tmp); // store c in a
1408   __ store_ptr(3, R4_tmp); // store a in c
1409   // stack: ..., c, d, a, b, c, d
1410 }
1411 
1412 
1413 void TemplateTable::swap() {
1414   transition(vtos, vtos);
1415   // stack: ..., a, b
1416   __ load_ptr(1, R0_tmp);  // load a
1417   __ load_ptr(0, R2_tmp);  // load b
1418   __ store_ptr(0, R0_tmp); // store a in b
1419   __ store_ptr(1, R2_tmp); // store b in a
1420   // stack: ..., b, a
1421 }
1422 
1423 
1424 void TemplateTable::iop2(Operation op) {
1425   transition(itos, itos);
1426   const Register arg1 = R1_tmp;
1427   const Register arg2 = R0_tos;
1428 
1429   __ pop_i(arg1);
1430   switch (op) {
1431     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1432     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1433     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1434     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1435     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1436     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1437 #ifdef AARCH64
1438     case shl  : __ lslv_w (R0_tos, arg1, arg2); break;
1439     case shr  : __ asrv_w (R0_tos, arg1, arg2); break;
1440     case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1441 #else
1442     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1443     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1444     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1445 #endif // AARCH64
1446     default   : ShouldNotReachHere();
1447   }
1448 }
1449 
1450 
1451 void TemplateTable::lop2(Operation op) {
1452   transition(ltos, ltos);
1453 #ifdef AARCH64
1454   const Register arg1 = R1_tmp;
1455   const Register arg2 = R0_tos;
1456 
1457   __ pop_l(arg1);
1458   switch (op) {
1459     case add  : __ add (R0_tos, arg1, arg2); break;
1460     case sub  : __ sub (R0_tos, arg1, arg2); break;
1461     case _and : __ andr(R0_tos, arg1, arg2); break;
1462     case _or  : __ orr (R0_tos, arg1, arg2); break;
1463     case _xor : __ eor (R0_tos, arg1, arg2); break;
1464     default   : ShouldNotReachHere();
1465   }
1466 #else
1467   const Register arg1_lo = R2_tmp;
1468   const Register arg1_hi = R3_tmp;
1469   const Register arg2_lo = R0_tos_lo;
1470   const Register arg2_hi = R1_tos_hi;
1471 
1472   __ pop_l(arg1_lo, arg1_hi);
1473   switch (op) {
1474     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1475     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1476     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1477     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1478     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1479     default : ShouldNotReachHere();
1480   }
1481 #endif // AARCH64
1482 }
1483 
1484 
1485 void TemplateTable::idiv() {
1486   transition(itos, itos);
1487 #ifdef AARCH64
1488   const Register divisor = R0_tos;
1489   const Register dividend = R1_tmp;
1490 
1491   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1492   __ pop_i(dividend);
1493   __ sdiv_w(R0_tos, dividend, divisor);
1494 #else
1495   __ mov(R2, R0_tos);
1496   __ pop_i(R0);
1497   // R0 - dividend
1498   // R2 - divisor
1499   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1500   // R1 - result
1501   __ mov(R0_tos, R1);
1502 #endif // AARCH64
1503 }
1504 
1505 
1506 void TemplateTable::irem() {
1507   transition(itos, itos);
1508 #ifdef AARCH64
1509   const Register divisor = R0_tos;
1510   const Register dividend = R1_tmp;
1511   const Register quotient = R2_tmp;
1512 
1513   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1514   __ pop_i(dividend);
1515   __ sdiv_w(quotient, dividend, divisor);
1516   __ msub_w(R0_tos, divisor, quotient, dividend);
1517 #else
1518   __ mov(R2, R0_tos);
1519   __ pop_i(R0);
1520   // R0 - dividend
1521   // R2 - divisor
1522   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1523   // R0 - remainder
1524 #endif // AARCH64
1525 }
1526 
1527 
1528 void TemplateTable::lmul() {
1529   transition(ltos, ltos);
1530 #ifdef AARCH64
1531   const Register arg1 = R0_tos;
1532   const Register arg2 = R1_tmp;
1533 
1534   __ pop_l(arg2);
1535   __ mul(R0_tos, arg1, arg2);
1536 #else
1537   const Register arg1_lo = R0_tos_lo;
1538   const Register arg1_hi = R1_tos_hi;
1539   const Register arg2_lo = R2_tmp;
1540   const Register arg2_hi = R3_tmp;
1541 
1542   __ pop_l(arg2_lo, arg2_hi);
1543 
1544   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1545 #endif // AARCH64
1546 }
1547 
1548 
1549 void TemplateTable::ldiv() {
1550   transition(ltos, ltos);
1551 #ifdef AARCH64
1552   const Register divisor = R0_tos;
1553   const Register dividend = R1_tmp;
1554 
1555   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1556   __ pop_l(dividend);
1557   __ sdiv(R0_tos, dividend, divisor);
1558 #else
1559   const Register x_lo = R2_tmp;
1560   const Register x_hi = R3_tmp;
1561   const Register y_lo = R0_tos_lo;
1562   const Register y_hi = R1_tos_hi;
1563 
1564   __ pop_l(x_lo, x_hi);
1565 
1566   // check if y = 0
1567   __ orrs(Rtemp, y_lo, y_hi);
1568   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1569   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1570 #endif // AARCH64
1571 }
1572 
1573 
1574 void TemplateTable::lrem() {
1575   transition(ltos, ltos);
1576 #ifdef AARCH64
1577   const Register divisor = R0_tos;
1578   const Register dividend = R1_tmp;
1579   const Register quotient = R2_tmp;
1580 
1581   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1582   __ pop_l(dividend);
1583   __ sdiv(quotient, dividend, divisor);
1584   __ msub(R0_tos, divisor, quotient, dividend);
1585 #else
1586   const Register x_lo = R2_tmp;
1587   const Register x_hi = R3_tmp;
1588   const Register y_lo = R0_tos_lo;
1589   const Register y_hi = R1_tos_hi;
1590 
1591   __ pop_l(x_lo, x_hi);
1592 
1593   // check if y = 0
1594   __ orrs(Rtemp, y_lo, y_hi);
1595   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1596   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1597 #endif // AARCH64
1598 }
1599 
1600 
1601 void TemplateTable::lshl() {
1602   transition(itos, ltos);
1603 #ifdef AARCH64
1604   const Register val = R1_tmp;
1605   const Register shift_cnt = R0_tos;
1606   __ pop_l(val);
1607   __ lslv(R0_tos, val, shift_cnt);
1608 #else
1609   const Register shift_cnt = R4_tmp;
1610   const Register val_lo = R2_tmp;
1611   const Register val_hi = R3_tmp;
1612 
1613   __ pop_l(val_lo, val_hi);
1614   __ andr(shift_cnt, R0_tos, 63);
1615   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1616 #endif // AARCH64
1617 }
1618 
1619 
1620 void TemplateTable::lshr() {
1621   transition(itos, ltos);
1622 #ifdef AARCH64
1623   const Register val = R1_tmp;
1624   const Register shift_cnt = R0_tos;
1625   __ pop_l(val);
1626   __ asrv(R0_tos, val, shift_cnt);
1627 #else
1628   const Register shift_cnt = R4_tmp;
1629   const Register val_lo = R2_tmp;
1630   const Register val_hi = R3_tmp;
1631 
1632   __ pop_l(val_lo, val_hi);
1633   __ andr(shift_cnt, R0_tos, 63);
1634   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1635 #endif // AARCH64
1636 }
1637 
1638 
1639 void TemplateTable::lushr() {
1640   transition(itos, ltos);
1641 #ifdef AARCH64
1642   const Register val = R1_tmp;
1643   const Register shift_cnt = R0_tos;
1644   __ pop_l(val);
1645   __ lsrv(R0_tos, val, shift_cnt);
1646 #else
1647   const Register shift_cnt = R4_tmp;
1648   const Register val_lo = R2_tmp;
1649   const Register val_hi = R3_tmp;
1650 
1651   __ pop_l(val_lo, val_hi);
1652   __ andr(shift_cnt, R0_tos, 63);
1653   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1654 #endif // AARCH64
1655 }
1656 
1657 
1658 void TemplateTable::fop2(Operation op) {
1659   transition(ftos, ftos);
1660 #ifdef __SOFTFP__
1661   __ mov(R1, R0_tos);
1662   __ pop_i(R0);
1663   switch (op) {
1664     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1665     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1666     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1667     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1668     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1669     default : ShouldNotReachHere();
1670   }
1671 #else
1672   const FloatRegister arg1 = S1_tmp;
1673   const FloatRegister arg2 = S0_tos;
1674 
1675   switch (op) {
1676     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1677     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1678     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1679     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1680     case rem:
1681 #ifndef __ABI_HARD__
1682       __ pop_f(arg1);
1683       __ fmrs(R0, arg1);
1684       __ fmrs(R1, arg2);
1685       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1686       __ fmsr(S0_tos, R0);
1687 #else
1688       __ mov_float(S1_reg, arg2);
1689       __ pop_f(S0);
1690       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1691 #endif // !__ABI_HARD__
1692       break;
1693     default : ShouldNotReachHere();
1694   }
1695 #endif // __SOFTFP__
1696 }
1697 
1698 
1699 void TemplateTable::dop2(Operation op) {
1700   transition(dtos, dtos);
1701 #ifdef __SOFTFP__
1702   __ mov(R2, R0_tos_lo);
1703   __ mov(R3, R1_tos_hi);
1704   __ pop_l(R0, R1);
1705   switch (op) {
1706     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1707     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1708     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1709     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1710     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1711     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1712     default : ShouldNotReachHere();
1713   }
1714 #else
1715   const FloatRegister arg1 = D1_tmp;
1716   const FloatRegister arg2 = D0_tos;
1717 
1718   switch (op) {
1719     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1720     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1721     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1722     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1723     case rem:
1724 #ifndef __ABI_HARD__
1725       __ pop_d(arg1);
1726       __ fmrrd(R0, R1, arg1);
1727       __ fmrrd(R2, R3, arg2);
1728       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1729       __ fmdrr(D0_tos, R0, R1);
1730 #else
1731       __ mov_double(D1, arg2);
1732       __ pop_d(D0);
1733       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1734 #endif // !__ABI_HARD__
1735       break;
1736     default : ShouldNotReachHere();
1737   }
1738 #endif // __SOFTFP__
1739 }
1740 
1741 
1742 void TemplateTable::ineg() {
1743   transition(itos, itos);
1744   __ neg_32(R0_tos, R0_tos);
1745 }
1746 
1747 
1748 void TemplateTable::lneg() {
1749   transition(ltos, ltos);
1750 #ifdef AARCH64
1751   __ neg(R0_tos, R0_tos);
1752 #else
1753   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1754   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1755 #endif // AARCH64
1756 }
1757 
1758 
1759 void TemplateTable::fneg() {
1760   transition(ftos, ftos);
1761 #ifdef __SOFTFP__
1762   // Invert sign bit
1763   const int sign_mask = 0x80000000;
1764   __ eor(R0_tos, R0_tos, sign_mask);
1765 #else
1766   __ neg_float(S0_tos, S0_tos);
1767 #endif // __SOFTFP__
1768 }
1769 
1770 
1771 void TemplateTable::dneg() {
1772   transition(dtos, dtos);
1773 #ifdef __SOFTFP__
1774   // Invert sign bit in the high part of the double
1775   const int sign_mask_hi = 0x80000000;
1776   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1777 #else
1778   __ neg_double(D0_tos, D0_tos);
1779 #endif // __SOFTFP__
1780 }
1781 
1782 
1783 void TemplateTable::iinc() {
1784   transition(vtos, vtos);
1785   const Register Rconst = R2_tmp;
1786   const Register Rlocal_index = R1_tmp;
1787   const Register Rval = R0_tmp;
1788 
1789   __ ldrsb(Rconst, at_bcp(2));
1790   locals_index(Rlocal_index);
1791   Address local = load_iaddress(Rlocal_index, Rtemp);
1792   __ ldr_s32(Rval, local);
1793   __ add(Rval, Rval, Rconst);
1794   __ str_32(Rval, local);
1795 }
1796 
1797 
1798 void TemplateTable::wide_iinc() {
1799   transition(vtos, vtos);
1800   const Register Rconst = R2_tmp;
1801   const Register Rlocal_index = R1_tmp;
1802   const Register Rval = R0_tmp;
1803 
1804   // get constant in Rconst
1805   __ ldrsb(R2_tmp, at_bcp(4));
1806   __ ldrb(R3_tmp, at_bcp(5));
1807   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1808 
1809   locals_index_wide(Rlocal_index);
1810   Address local = load_iaddress(Rlocal_index, Rtemp);
1811   __ ldr_s32(Rval, local);
1812   __ add(Rval, Rval, Rconst);
1813   __ str_32(Rval, local);
1814 }
1815 
1816 
1817 void TemplateTable::convert() {
1818   // Checking
1819 #ifdef ASSERT
1820   { TosState tos_in  = ilgl;
1821     TosState tos_out = ilgl;
1822     switch (bytecode()) {
1823       case Bytecodes::_i2l: // fall through
1824       case Bytecodes::_i2f: // fall through
1825       case Bytecodes::_i2d: // fall through
1826       case Bytecodes::_i2b: // fall through
1827       case Bytecodes::_i2c: // fall through
1828       case Bytecodes::_i2s: tos_in = itos; break;
1829       case Bytecodes::_l2i: // fall through
1830       case Bytecodes::_l2f: // fall through
1831       case Bytecodes::_l2d: tos_in = ltos; break;
1832       case Bytecodes::_f2i: // fall through
1833       case Bytecodes::_f2l: // fall through
1834       case Bytecodes::_f2d: tos_in = ftos; break;
1835       case Bytecodes::_d2i: // fall through
1836       case Bytecodes::_d2l: // fall through
1837       case Bytecodes::_d2f: tos_in = dtos; break;
1838       default             : ShouldNotReachHere();
1839     }
1840     switch (bytecode()) {
1841       case Bytecodes::_l2i: // fall through
1842       case Bytecodes::_f2i: // fall through
1843       case Bytecodes::_d2i: // fall through
1844       case Bytecodes::_i2b: // fall through
1845       case Bytecodes::_i2c: // fall through
1846       case Bytecodes::_i2s: tos_out = itos; break;
1847       case Bytecodes::_i2l: // fall through
1848       case Bytecodes::_f2l: // fall through
1849       case Bytecodes::_d2l: tos_out = ltos; break;
1850       case Bytecodes::_i2f: // fall through
1851       case Bytecodes::_l2f: // fall through
1852       case Bytecodes::_d2f: tos_out = ftos; break;
1853       case Bytecodes::_i2d: // fall through
1854       case Bytecodes::_l2d: // fall through
1855       case Bytecodes::_f2d: tos_out = dtos; break;
1856       default             : ShouldNotReachHere();
1857     }
1858     transition(tos_in, tos_out);
1859   }
1860 #endif // ASSERT
1861 
1862   // Conversion
1863   switch (bytecode()) {
1864     case Bytecodes::_i2l:
1865 #ifdef AARCH64
1866       __ sign_extend(R0_tos, R0_tos, 32);
1867 #else
1868       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1869 #endif // AARCH64
1870       break;
1871 
1872     case Bytecodes::_i2f:
1873 #ifdef AARCH64
1874       __ scvtf_sw(S0_tos, R0_tos);
1875 #else
1876 #ifdef __SOFTFP__
1877       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1878 #else
1879       __ fmsr(S0_tmp, R0_tos);
1880       __ fsitos(S0_tos, S0_tmp);
1881 #endif // __SOFTFP__
1882 #endif // AARCH64
1883       break;
1884 
1885     case Bytecodes::_i2d:
1886 #ifdef AARCH64
1887       __ scvtf_dw(D0_tos, R0_tos);
1888 #else
1889 #ifdef __SOFTFP__
1890       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1891 #else
1892       __ fmsr(S0_tmp, R0_tos);
1893       __ fsitod(D0_tos, S0_tmp);
1894 #endif // __SOFTFP__
1895 #endif // AARCH64
1896       break;
1897 
1898     case Bytecodes::_i2b:
1899       __ sign_extend(R0_tos, R0_tos, 8);
1900       break;
1901 
1902     case Bytecodes::_i2c:
1903       __ zero_extend(R0_tos, R0_tos, 16);
1904       break;
1905 
1906     case Bytecodes::_i2s:
1907       __ sign_extend(R0_tos, R0_tos, 16);
1908       break;
1909 
1910     case Bytecodes::_l2i:
1911       /* nothing to do */
1912       break;
1913 
1914     case Bytecodes::_l2f:
1915 #ifdef AARCH64
1916       __ scvtf_sx(S0_tos, R0_tos);
1917 #else
1918       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1919 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1920       __ fmsr(S0_tos, R0);
1921 #endif // !__SOFTFP__ && !__ABI_HARD__
1922 #endif // AARCH64
1923       break;
1924 
1925     case Bytecodes::_l2d:
1926 #ifdef AARCH64
1927       __ scvtf_dx(D0_tos, R0_tos);
1928 #else
1929       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1930 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1931       __ fmdrr(D0_tos, R0, R1);
1932 #endif // !__SOFTFP__ && !__ABI_HARD__
1933 #endif // AARCH64
1934       break;
1935 
1936     case Bytecodes::_f2i:
1937 #ifdef AARCH64
1938       __ fcvtzs_ws(R0_tos, S0_tos);
1939 #else
1940 #ifndef __SOFTFP__
1941       __ ftosizs(S0_tos, S0_tos);
1942       __ fmrs(R0_tos, S0_tos);
1943 #else
1944       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1945 #endif // !__SOFTFP__
1946 #endif // AARCH64
1947       break;
1948 
1949     case Bytecodes::_f2l:
1950 #ifdef AARCH64
1951       __ fcvtzs_xs(R0_tos, S0_tos);
1952 #else
1953 #ifndef __SOFTFP__
1954       __ fmrs(R0_tos, S0_tos);
1955 #endif // !__SOFTFP__
1956       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
1957 #endif // AARCH64
1958       break;
1959 
1960     case Bytecodes::_f2d:
1961 #ifdef __SOFTFP__
1962       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
1963 #else
1964       __ convert_f2d(D0_tos, S0_tos);
1965 #endif // __SOFTFP__
1966       break;
1967 
1968     case Bytecodes::_d2i:
1969 #ifdef AARCH64
1970       __ fcvtzs_wd(R0_tos, D0_tos);
1971 #else
1972 #ifndef __SOFTFP__
1973       __ ftosizd(Stemp, D0);
1974       __ fmrs(R0, Stemp);
1975 #else
1976       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
1977 #endif // !__SOFTFP__
1978 #endif // AARCH64
1979       break;
1980 
1981     case Bytecodes::_d2l:
1982 #ifdef AARCH64
1983       __ fcvtzs_xd(R0_tos, D0_tos);
1984 #else
1985 #ifndef __SOFTFP__
1986       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
1987 #endif // !__SOFTFP__
1988       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
1989 #endif // AARCH64
1990       break;
1991 
1992     case Bytecodes::_d2f:
1993 #ifdef __SOFTFP__
1994       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
1995 #else
1996       __ convert_d2f(S0_tos, D0_tos);
1997 #endif // __SOFTFP__
1998       break;
1999 
2000     default:
2001       ShouldNotReachHere();
2002   }
2003 }
2004 
2005 
2006 void TemplateTable::lcmp() {
2007   transition(ltos, itos);
2008 #ifdef AARCH64
2009   const Register arg1 = R1_tmp;
2010   const Register arg2 = R0_tos;
2011 
2012   __ pop_l(arg1);
2013 
2014   __ cmp(arg1, arg2);
2015   __ cset(R0_tos, gt);               // 1 if '>', else 0
2016   __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2017 #else
2018   const Register arg1_lo = R2_tmp;
2019   const Register arg1_hi = R3_tmp;
2020   const Register arg2_lo = R0_tos_lo;
2021   const Register arg2_hi = R1_tos_hi;
2022   const Register res = R4_tmp;
2023 
2024   __ pop_l(arg1_lo, arg1_hi);
2025 
2026   // long compare arg1 with arg2
2027   // result is -1/0/+1 if '<'/'='/'>'
2028   Label done;
2029 
2030   __ mov (res, 0);
2031   __ cmp (arg1_hi, arg2_hi);
2032   __ mvn (res, 0, lt);
2033   __ mov (res, 1, gt);
2034   __ b(done, ne);
2035   __ cmp (arg1_lo, arg2_lo);
2036   __ mvn (res, 0, lo);
2037   __ mov (res, 1, hi);
2038   __ bind(done);
2039   __ mov (R0_tos, res);
2040 #endif // AARCH64
2041 }
2042 
2043 
2044 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2045   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2046 
2047 #ifdef AARCH64
2048   if (is_float) {
2049     transition(ftos, itos);
2050     __ pop_f(S1_tmp);
2051     __ fcmp_s(S1_tmp, S0_tos);
2052   } else {
2053     transition(dtos, itos);
2054     __ pop_d(D1_tmp);
2055     __ fcmp_d(D1_tmp, D0_tos);
2056   }
2057 
2058   if (unordered_result < 0) {
2059     __ cset(R0_tos, gt);               // 1 if '>', else 0
2060     __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2061   } else {
2062     __ cset(R0_tos, hi);               // 1 if '>' or unordered, else 0
2063     __ csinv(R0_tos, R0_tos, ZR, pl);  // previous value if '>=' or unordered, else -1
2064   }
2065 
2066 #else
2067 
2068 #ifdef __SOFTFP__
2069 
2070   if (is_float) {
2071     transition(ftos, itos);
2072     const Register Rx = R0;
2073     const Register Ry = R1;
2074 
2075     __ mov(Ry, R0_tos);
2076     __ pop_i(Rx);
2077 
2078     if (unordered_result == 1) {
2079       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2080     } else {
2081       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2082     }
2083 
2084   } else {
2085 
2086     transition(dtos, itos);
2087     const Register Rx_lo = R0;
2088     const Register Rx_hi = R1;
2089     const Register Ry_lo = R2;
2090     const Register Ry_hi = R3;
2091 
2092     __ mov(Ry_lo, R0_tos_lo);
2093     __ mov(Ry_hi, R1_tos_hi);
2094     __ pop_l(Rx_lo, Rx_hi);
2095 
2096     if (unordered_result == 1) {
2097       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2098     } else {
2099       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2100     }
2101   }
2102 
2103 #else
2104 
2105   if (is_float) {
2106     transition(ftos, itos);
2107     __ pop_f(S1_tmp);
2108     __ fcmps(S1_tmp, S0_tos);
2109   } else {
2110     transition(dtos, itos);
2111     __ pop_d(D1_tmp);
2112     __ fcmpd(D1_tmp, D0_tos);
2113   }
2114 
2115   __ fmstat();
2116 
2117   // comparison result | flag N | flag Z | flag C | flag V
2118   // "<"               |   1    |   0    |   0    |   0
2119   // "=="              |   0    |   1    |   1    |   0
2120   // ">"               |   0    |   0    |   1    |   0
2121   // unordered         |   0    |   0    |   1    |   1
2122 
2123   if (unordered_result < 0) {
2124     __ mov(R0_tos, 1);           // result ==  1 if greater
2125     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2126   } else {
2127     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2128     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2129   }
2130   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2131 #endif // __SOFTFP__
2132 #endif // AARCH64
2133 }
2134 
2135 
2136 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2137 
2138   const Register Rdisp = R0_tmp;
2139   const Register Rbumped_taken_count = R5_tmp;
2140 
2141   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2142 
2143   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2144                              InvocationCounter::counter_offset();
2145   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2146                               InvocationCounter::counter_offset();
2147   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2148 
2149   // Load up R0 with the branch displacement
2150   if (is_wide) {
2151     __ ldrsb(R0_tmp, at_bcp(1));
2152     __ ldrb(R1_tmp, at_bcp(2));
2153     __ ldrb(R2_tmp, at_bcp(3));
2154     __ ldrb(R3_tmp, at_bcp(4));
2155     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2156     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2157     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2158   } else {
2159     __ ldrsb(R0_tmp, at_bcp(1));
2160     __ ldrb(R1_tmp, at_bcp(2));
2161     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2162   }
2163 
2164   // Handle all the JSR stuff here, then exit.
2165   // It's much shorter and cleaner than intermingling with the
2166   // non-JSR normal-branch stuff occuring below.
2167   if (is_jsr) {
2168     // compute return address as bci in R1
2169     const Register Rret_addr = R1_tmp;
2170     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2171 
2172     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2173     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2174     __ sub(Rret_addr, Rret_addr, Rtemp);
2175 
2176     // Load the next target bytecode into R3_bytecode and advance Rbcp
2177 #ifdef AARCH64
2178     __ add(Rbcp, Rbcp, Rdisp);
2179     __ ldrb(R3_bytecode, Address(Rbcp));
2180 #else
2181     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2182 #endif // AARCH64
2183 
2184     // Push return address
2185     __ push_i(Rret_addr);
2186     // jsr returns vtos
2187     __ dispatch_only_noverify(vtos);
2188     return;
2189   }
2190 
2191   // Normal (non-jsr) branch handling
2192 
2193   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2194 #ifdef AARCH64
2195   __ add(Rbcp, Rbcp, Rdisp);
2196   __ ldrb(R3_bytecode, Address(Rbcp));
2197 #else
2198   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2199 #endif // AARCH64
2200 
2201   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2202   Label backedge_counter_overflow;
2203   Label profile_method;
2204   Label dispatch;
2205 
2206   if (UseLoopCounter) {
2207     // increment backedge counter for backward branches
2208     // Rdisp (R0): target offset
2209 
2210     const Register Rcnt = R2_tmp;
2211     const Register Rcounters = R1_tmp;
2212 
2213     // count only if backward branch
2214 #ifdef AARCH64
2215     __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2216 #else
2217     __ tst(Rdisp, Rdisp);
2218     __ b(dispatch, pl);
2219 #endif // AARCH64
2220 
2221     if (TieredCompilation) {
2222       Label no_mdo;
2223       int increment = InvocationCounter::count_increment;
2224       if (ProfileInterpreter) {
2225         // Are we profiling?
2226         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2227         __ cbz(Rtemp, no_mdo);
2228         // Increment the MDO backedge counter
2229         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2230                                                   in_bytes(InvocationCounter::counter_offset()));
2231         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2232         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2233                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2234         __ b(dispatch);
2235       }
2236       __ bind(no_mdo);
2237       // Increment backedge counter in MethodCounters*
2238       // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2239       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2240                              Rdisp, R3_bytecode,
2241                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2242       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2243       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2244                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2245     } else {
2246       // Increment backedge counter in MethodCounters*
2247       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2248                              Rdisp, R3_bytecode,
2249                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2250       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2251       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2252       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2253 
2254       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2255 #ifdef AARCH64
2256       __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value);  // and the status bits
2257 #else
2258       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2259 #endif // AARCH64
2260       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2261 
2262       if (ProfileInterpreter) {
2263         // Test to see if we should create a method data oop
2264         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2265         __ ldr_s32(Rtemp, profile_limit);
2266         __ cmp_32(Rcnt, Rtemp);
2267         __ b(dispatch, lt);
2268 
2269         // if no method data exists, go to profile method
2270         __ test_method_data_pointer(R4_tmp, profile_method);
2271 
2272         if (UseOnStackReplacement) {
2273           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2274           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2275           __ ldr_s32(Rtemp, backward_branch_limit);
2276           __ cmp(Rbumped_taken_count, Rtemp);
2277           __ b(dispatch, lo);
2278 
2279           // When ProfileInterpreter is on, the backedge_count comes from the
2280           // MethodData*, which value does not get reset on the call to
2281           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2282           // routine while the method is being compiled, add a second test to make
2283           // sure the overflow function is called only once every overflow_frequency.
2284           const int overflow_frequency = 1024;
2285 
2286 #ifdef AARCH64
2287           __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2288 #else
2289           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2290           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2291           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2292 #endif // AARCH64
2293 
2294           __ b(backedge_counter_overflow, eq);
2295         }
2296       } else {
2297         if (UseOnStackReplacement) {
2298           // check for overflow against Rcnt, which is the sum of the counters
2299           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2300           __ ldr_s32(Rtemp, backward_branch_limit);
2301           __ cmp_32(Rcnt, Rtemp);
2302           __ b(backedge_counter_overflow, hs);
2303 
2304         }
2305       }
2306     }
2307     __ bind(dispatch);
2308   }
2309 
2310   if (!UseOnStackReplacement) {
2311     __ bind(backedge_counter_overflow);
2312   }
2313 
2314   // continue with the bytecode @ target
2315   __ dispatch_only(vtos);
2316 
2317   if (UseLoopCounter) {
2318     if (ProfileInterpreter) {
2319       // Out-of-line code to allocate method data oop.
2320       __ bind(profile_method);
2321 
2322       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2323       __ set_method_data_pointer_for_bcp();
2324       // reload next bytecode
2325       __ ldrb(R3_bytecode, Address(Rbcp));
2326       __ b(dispatch);
2327     }
2328 
2329     if (UseOnStackReplacement) {
2330       // invocation counter overflow
2331       __ bind(backedge_counter_overflow);
2332 
2333       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2334       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2335 
2336       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2337       const Register Rnmethod = R0;
2338 
2339       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2340 
2341       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2342 
2343       // nmethod may have been invalidated (VM may block upon call_VM return)
2344       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2345       __ cmp(R1_tmp, nmethod::in_use);
2346       __ b(dispatch, ne);
2347 
2348       // We have the address of an on stack replacement routine in Rnmethod,
2349       // We need to prepare to execute the OSR method. First we must
2350       // migrate the locals and monitors off of the stack.
2351 
2352       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2353 
2354       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2355 
2356       // R0 is OSR buffer
2357 
2358       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2359       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2360 
2361 #ifdef AARCH64
2362       __ ldp(FP, LR, Address(FP));
2363       __ mov(SP, Rtemp);
2364 #else
2365       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2366       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2367 #endif // AARCH64
2368 
2369       __ jump(R1_tmp);
2370     }
2371   }
2372 }
2373 
2374 
2375 void TemplateTable::if_0cmp(Condition cc) {
2376   transition(itos, vtos);
2377   // assume branch is more often taken than not (loops use backward branches)
2378   Label not_taken;
2379 #ifdef AARCH64
2380   if (cc == equal) {
2381     __ cbnz_w(R0_tos, not_taken);
2382   } else if (cc == not_equal) {
2383     __ cbz_w(R0_tos, not_taken);
2384   } else {
2385     __ cmp_32(R0_tos, 0);
2386     __ b(not_taken, convNegCond(cc));
2387   }
2388 #else
2389   __ cmp_32(R0_tos, 0);
2390   __ b(not_taken, convNegCond(cc));
2391 #endif // AARCH64
2392   branch(false, false);
2393   __ bind(not_taken);
2394   __ profile_not_taken_branch(R0_tmp);
2395 }
2396 
2397 
2398 void TemplateTable::if_icmp(Condition cc) {
2399   transition(itos, vtos);
2400   // assume branch is more often taken than not (loops use backward branches)
2401   Label not_taken;
2402   __ pop_i(R1_tmp);
2403   __ cmp_32(R1_tmp, R0_tos);
2404   __ b(not_taken, convNegCond(cc));
2405   branch(false, false);
2406   __ bind(not_taken);
2407   __ profile_not_taken_branch(R0_tmp);
2408 }
2409 
2410 
2411 void TemplateTable::if_nullcmp(Condition cc) {
2412   transition(atos, vtos);
2413   assert(cc == equal || cc == not_equal, "invalid condition");
2414 
2415   // assume branch is more often taken than not (loops use backward branches)
2416   Label not_taken;
2417   if (cc == equal) {
2418     __ cbnz(R0_tos, not_taken);
2419   } else {
2420     __ cbz(R0_tos, not_taken);
2421   }
2422   branch(false, false);
2423   __ bind(not_taken);
2424   __ profile_not_taken_branch(R0_tmp);
2425 }
2426 
2427 
2428 void TemplateTable::if_acmp(Condition cc) {
2429   transition(atos, vtos);
2430   // assume branch is more often taken than not (loops use backward branches)
2431   Label not_taken;
2432   __ pop_ptr(R1_tmp);
2433   __ cmp(R1_tmp, R0_tos);
2434   __ b(not_taken, convNegCond(cc));
2435   branch(false, false);
2436   __ bind(not_taken);
2437   __ profile_not_taken_branch(R0_tmp);
2438 }
2439 
2440 
2441 void TemplateTable::ret() {
2442   transition(vtos, vtos);
2443   const Register Rlocal_index = R1_tmp;
2444   const Register Rret_bci = Rtmp_save0; // R4/R19
2445 
2446   locals_index(Rlocal_index);
2447   Address local = load_iaddress(Rlocal_index, Rtemp);
2448   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2449   __ profile_ret(Rtmp_save1, Rret_bci);
2450   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2451   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2452   __ add(Rbcp, Rtemp, Rret_bci);
2453   __ dispatch_next(vtos);
2454 }
2455 
2456 
2457 void TemplateTable::wide_ret() {
2458   transition(vtos, vtos);
2459   const Register Rlocal_index = R1_tmp;
2460   const Register Rret_bci = Rtmp_save0; // R4/R19
2461 
2462   locals_index_wide(Rlocal_index);
2463   Address local = load_iaddress(Rlocal_index, Rtemp);
2464   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2465   __ profile_ret(Rtmp_save1, Rret_bci);
2466   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2467   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2468   __ add(Rbcp, Rtemp, Rret_bci);
2469   __ dispatch_next(vtos);
2470 }
2471 
2472 
2473 void TemplateTable::tableswitch() {
2474   transition(itos, vtos);
2475 
2476   const Register Rindex  = R0_tos;
2477 #ifndef AARCH64
2478   const Register Rtemp2  = R1_tmp;
2479 #endif // !AARCH64
2480   const Register Rabcp   = R2_tmp;  // aligned bcp
2481   const Register Rlow    = R3_tmp;
2482   const Register Rhigh   = R4_tmp;
2483   const Register Roffset = R5_tmp;
2484 
2485   // align bcp
2486   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2487   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2488 
2489   // load lo & hi
2490 #ifdef AARCH64
2491   __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2492 #else
2493   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2494 #endif // AARCH64
2495   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2496   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2497 
2498   // compare index with high bound
2499   __ cmp_32(Rhigh, Rindex);
2500 
2501 #ifdef AARCH64
2502   Label default_case, do_dispatch;
2503   __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2504   __ b(default_case, lt);
2505 
2506   __ sub_w(Rindex, Rindex, Rlow);
2507   __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2508   if(ProfileInterpreter) {
2509     __ sxtw(Rindex, Rindex);
2510     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2511   }
2512   __ b(do_dispatch);
2513 
2514   __ bind(default_case);
2515   __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2516   if(ProfileInterpreter) {
2517     __ profile_switch_default(R0_tmp);
2518   }
2519 
2520   __ bind(do_dispatch);
2521 #else
2522 
2523   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2524   __ subs(Rindex, Rindex, Rlow, ge);
2525 
2526   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2527   // ("ge" status accumulated from cmp and subs instructions) then load
2528   // offset from table, otherwise load offset for default case
2529 
2530   if(ProfileInterpreter) {
2531     Label default_case, continue_execution;
2532 
2533     __ b(default_case, lt);
2534     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2535     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2536     __ b(continue_execution);
2537 
2538     __ bind(default_case);
2539     __ profile_switch_default(R0_tmp);
2540     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2541 
2542     __ bind(continue_execution);
2543   } else {
2544     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2545     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2546   }
2547 #endif // AARCH64
2548 
2549   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2550 
2551   // load the next bytecode to R3_bytecode and advance Rbcp
2552 #ifdef AARCH64
2553   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2554   __ ldrb(R3_bytecode, Address(Rbcp));
2555 #else
2556   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2557 #endif // AARCH64
2558   __ dispatch_only(vtos);
2559 
2560 }
2561 
2562 
2563 void TemplateTable::lookupswitch() {
2564   transition(itos, itos);
2565   __ stop("lookupswitch bytecode should have been rewritten");
2566 }
2567 
2568 
2569 void TemplateTable::fast_linearswitch() {
2570   transition(itos, vtos);
2571   Label loop, found, default_case, continue_execution;
2572 
2573   const Register Rkey     = R0_tos;
2574   const Register Rabcp    = R2_tmp;  // aligned bcp
2575   const Register Rdefault = R3_tmp;
2576   const Register Rcount   = R4_tmp;
2577   const Register Roffset  = R5_tmp;
2578 
2579   // bswap Rkey, so we can avoid bswapping the table entries
2580   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2581 
2582   // align bcp
2583   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2584   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2585 
2586   // load default & counter
2587 #ifdef AARCH64
2588   __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2589 #else
2590   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2591 #endif // AARCH64
2592   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2593 
2594 #ifdef AARCH64
2595   __ cbz_w(Rcount, default_case);
2596 #else
2597   __ cmp_32(Rcount, 0);
2598   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2599   __ b(default_case, eq);
2600 #endif // AARCH64
2601 
2602   // table search
2603   __ bind(loop);
2604 #ifdef AARCH64
2605   __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2606 #endif // AARCH64
2607   __ cmp_32(Rtemp, Rkey);
2608   __ b(found, eq);
2609   __ subs(Rcount, Rcount, 1);
2610 #ifndef AARCH64
2611   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2612 #endif // !AARCH64
2613   __ b(loop, ne);
2614 
2615   // default case
2616   __ bind(default_case);
2617   __ profile_switch_default(R0_tmp);
2618   __ mov(Roffset, Rdefault);
2619   __ b(continue_execution);
2620 
2621   // entry found -> get offset
2622   __ bind(found);
2623   // Rabcp is already incremented and points to the next entry
2624   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2625   if (ProfileInterpreter) {
2626     // Calculate index of the selected case.
2627     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2628 
2629     // align bcp
2630     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2631     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2632 
2633     // load number of cases
2634     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2635     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2636 
2637     // Selected index = <number of cases> - <current loop count>
2638     __ sub(R1_tmp, R2_tmp, Rcount);
2639     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2640   }
2641 
2642   // continue execution
2643   __ bind(continue_execution);
2644   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2645 
2646   // load the next bytecode to R3_bytecode and advance Rbcp
2647 #ifdef AARCH64
2648   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2649   __ ldrb(R3_bytecode, Address(Rbcp));
2650 #else
2651   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2652 #endif // AARCH64
2653   __ dispatch_only(vtos);
2654 }
2655 
2656 
2657 void TemplateTable::fast_binaryswitch() {
2658   transition(itos, vtos);
2659   // Implementation using the following core algorithm:
2660   //
2661   // int binary_search(int key, LookupswitchPair* array, int n) {
2662   //   // Binary search according to "Methodik des Programmierens" by
2663   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2664   //   int i = 0;
2665   //   int j = n;
2666   //   while (i+1 < j) {
2667   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2668   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2669   //     // where a stands for the array and assuming that the (inexisting)
2670   //     // element a[n] is infinitely big.
2671   //     int h = (i + j) >> 1;
2672   //     // i < h < j
2673   //     if (key < array[h].fast_match()) {
2674   //       j = h;
2675   //     } else {
2676   //       i = h;
2677   //     }
2678   //   }
2679   //   // R: a[i] <= key < a[i+1] or Q
2680   //   // (i.e., if key is within array, i is the correct index)
2681   //   return i;
2682   // }
2683 
2684   // register allocation
2685   const Register key    = R0_tos;                // already set (tosca)
2686   const Register array  = R1_tmp;
2687   const Register i      = R2_tmp;
2688   const Register j      = R3_tmp;
2689   const Register h      = R4_tmp;
2690   const Register val    = R5_tmp;
2691   const Register temp1  = Rtemp;
2692   const Register temp2  = LR_tmp;
2693   const Register offset = R3_tmp;
2694 
2695   // set 'array' = aligned bcp + 2 ints
2696   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2697   __ align_reg(array, temp1, BytesPerInt);
2698 
2699   // initialize i & j
2700   __ mov(i, 0);                                  // i = 0;
2701   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2702   // Convert j into native byteordering
2703   __ byteswap_u32(j, temp1, temp2);
2704 
2705   // and start
2706   Label entry;
2707   __ b(entry);
2708 
2709   // binary search loop
2710   { Label loop;
2711     __ bind(loop);
2712     // int h = (i + j) >> 1;
2713     __ add(h, i, j);                             // h = i + j;
2714     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2715     // if (key < array[h].fast_match()) {
2716     //   j = h;
2717     // } else {
2718     //   i = h;
2719     // }
2720 #ifdef AARCH64
2721     __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2722     __ ldr_s32(val, Address(temp1));
2723 #else
2724     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2725 #endif // AARCH64
2726     // Convert array[h].match to native byte-ordering before compare
2727     __ byteswap_u32(val, temp1, temp2);
2728     __ cmp_32(key, val);
2729     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2730     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2731     // while (i+1 < j)
2732     __ bind(entry);
2733     __ add(temp1, i, 1);                             // i+1
2734     __ cmp(temp1, j);                                // i+1 < j
2735     __ b(loop, lt);
2736   }
2737 
2738   // end of binary search, result index is i (must check again!)
2739   Label default_case;
2740   // Convert array[i].match to native byte-ordering before compare
2741 #ifdef AARCH64
2742   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2743   __ ldr_s32(val, Address(temp1));
2744 #else
2745   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2746 #endif // AARCH64
2747   __ byteswap_u32(val, temp1, temp2);
2748   __ cmp_32(key, val);
2749   __ b(default_case, ne);
2750 
2751   // entry found
2752   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2753   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2754   __ profile_switch_case(R0, i, R1, i);
2755   __ byteswap_u32(offset, temp1, temp2);
2756 #ifdef AARCH64
2757   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2758   __ ldrb(R3_bytecode, Address(Rbcp));
2759 #else
2760   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2761 #endif // AARCH64
2762   __ dispatch_only(vtos);
2763 
2764   // default case
2765   __ bind(default_case);
2766   __ profile_switch_default(R0);
2767   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2768   __ byteswap_u32(offset, temp1, temp2);
2769 #ifdef AARCH64
2770   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2771   __ ldrb(R3_bytecode, Address(Rbcp));
2772 #else
2773   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2774 #endif // AARCH64
2775   __ dispatch_only(vtos);
2776 }
2777 
2778 
2779 void TemplateTable::_return(TosState state) {
2780   transition(state, state);
2781   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2782 
2783   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2784     Label skip_register_finalizer;
2785     assert(state == vtos, "only valid state");
2786     __ ldr(R1, aaddress(0));
2787     __ load_klass(Rtemp, R1);
2788     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2789     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2790 
2791     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2792 
2793     __ bind(skip_register_finalizer);
2794   }
2795 
2796   // Narrow result if state is itos but result type is smaller.
2797   // Need to narrow in the return bytecode rather than in generate_return_entry
2798   // since compiled code callers expect the result to already be narrowed.
2799   if (state == itos) {
2800     __ narrow(R0_tos);
2801   }
2802   __ remove_activation(state, LR);
2803 
2804   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2805 
2806 #ifndef AARCH64
2807   // According to interpreter calling conventions, result is returned in R0/R1,
2808   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2809   // This conversion should be done after remove_activation, as it uses
2810   // push(state) & pop(state) to preserve return value.
2811   __ convert_tos_to_retval(state);
2812 #endif // !AARCH64
2813 
2814   __ ret();
2815 
2816   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2817   __ nop();
2818 }
2819 
2820 
2821 // ----------------------------------------------------------------------------
2822 // Volatile variables demand their effects be made known to all CPU's in
2823 // order.  Store buffers on most chips allow reads & writes to reorder; the
2824 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2825 // memory barrier (i.e., it's not sufficient that the interpreter does not
2826 // reorder volatile references, the hardware also must not reorder them).
2827 //
2828 // According to the new Java Memory Model (JMM):
2829 // (1) All volatiles are serialized wrt to each other.
2830 // ALSO reads & writes act as aquire & release, so:
2831 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2832 // the read float up to before the read.  It's OK for non-volatile memory refs
2833 // that happen before the volatile read to float down below it.
2834 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2835 // that happen BEFORE the write float down to after the write.  It's OK for
2836 // non-volatile memory refs that happen after the volatile write to float up
2837 // before it.
2838 //
2839 // We only put in barriers around volatile refs (they are expensive), not
2840 // _between_ memory refs (that would require us to track the flavor of the
2841 // previous memory refs).  Requirements (2) and (3) require some barriers
2842 // before volatile stores and after volatile loads.  These nearly cover
2843 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2844 // case is placed after volatile-stores although it could just as well go
2845 // before volatile-loads.
2846 // TODO-AARCH64: consider removing extra unused parameters
2847 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2848                                      Register tmp,
2849                                      bool preserve_flags,
2850                                      Register load_tgt) {
2851 #ifdef AARCH64
2852   __ membar(order_constraint);
2853 #else
2854   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2855 #endif
2856 }
2857 
2858 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2859 void TemplateTable::resolve_cache_and_index(int byte_no,
2860                                             Register Rcache,
2861                                             Register Rindex,
2862                                             size_t index_size) {
2863   assert_different_registers(Rcache, Rindex, Rtemp);
2864 
2865   Label resolved;
2866   Bytecodes::Code code = bytecode();
2867   switch (code) {
2868   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2869   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2870   }
2871 
2872   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2873   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
2874   __ cmp(Rtemp, code);  // have we resolved this bytecode?
2875   __ b(resolved, eq);
2876 
2877   // resolve first time through
2878   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2879   __ mov(R1, code);
2880   __ call_VM(noreg, entry, R1);
2881   // Update registers with resolved info
2882   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
2883   __ bind(resolved);
2884 }
2885 
2886 
2887 // The Rcache and Rindex registers must be set before call
2888 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
2889                                               Register Rindex,
2890                                               Register Roffset,
2891                                               Register Rflags,
2892                                               Register Robj,
2893                                               bool is_static = false) {
2894 
2895   assert_different_registers(Rcache, Rindex, Rtemp);
2896   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
2897 
2898   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2899 
2900   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2901 
2902   // Field offset
2903   __ ldr(Roffset, Address(Rtemp,
2904            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2905 
2906   // Flags
2907   __ ldr_u32(Rflags, Address(Rtemp,
2908            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2909 
2910   if (is_static) {
2911     __ ldr(Robj, Address(Rtemp,
2912              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2913     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2914     __ ldr(Robj, Address(Robj, mirror_offset));
2915     __ resolve_oop_handle(Robj);
2916   }
2917 }
2918 
2919 
2920 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2921 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2922                                                Register method,
2923                                                Register itable_index,
2924                                                Register flags,
2925                                                bool is_invokevirtual,
2926                                                bool is_invokevfinal/*unused*/,
2927                                                bool is_invokedynamic) {
2928   // setup registers
2929   const Register cache = R2_tmp;
2930   const Register index = R3_tmp;
2931   const Register temp_reg = Rtemp;
2932   assert_different_registers(cache, index, temp_reg);
2933   assert_different_registers(method, itable_index, temp_reg);
2934 
2935   // determine constant pool cache field offsets
2936   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2937   const int method_offset = in_bytes(
2938     ConstantPoolCache::base_offset() +
2939       ((byte_no == f2_byte)
2940        ? ConstantPoolCacheEntry::f2_offset()
2941        : ConstantPoolCacheEntry::f1_offset()
2942       )
2943     );
2944   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2945                                     ConstantPoolCacheEntry::flags_offset());
2946   // access constant pool cache fields
2947   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2948                                     ConstantPoolCacheEntry::f2_offset());
2949 
2950   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2951   resolve_cache_and_index(byte_no, cache, index, index_size);
2952     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
2953     __ ldr(method, Address(temp_reg, method_offset));
2954 
2955   if (itable_index != noreg) {
2956     __ ldr(itable_index, Address(temp_reg, index_offset));
2957   }
2958   __ ldr_u32(flags, Address(temp_reg, flags_offset));
2959 }
2960 
2961 
2962 // The registers cache and index expected to be set before call, and should not be Rtemp.
2963 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
2964 // except cache and index registers which are preserved.
2965 void TemplateTable::jvmti_post_field_access(Register Rcache,
2966                                             Register Rindex,
2967                                             bool is_static,
2968                                             bool has_tos) {
2969   assert_different_registers(Rcache, Rindex, Rtemp);
2970 
2971   if (__ can_post_field_access()) {
2972     // Check to see if a field access watch has been set before we take
2973     // the time to call into the VM.
2974 
2975     Label Lcontinue;
2976 
2977     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
2978     __ cbz(Rtemp, Lcontinue);
2979 
2980     // cache entry pointer
2981     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2982     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
2983     if (is_static) {
2984       __ mov(R1, 0);        // NULL object reference
2985     } else {
2986       __ pop(atos);         // Get the object
2987       __ mov(R1, R0_tos);
2988       __ verify_oop(R1);
2989       __ push(atos);        // Restore stack state
2990     }
2991     // R1: object pointer or NULL
2992     // R2: cache entry pointer
2993     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2994                R1, R2);
2995     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
2996 
2997     __ bind(Lcontinue);
2998   }
2999 }
3000 
3001 
3002 void TemplateTable::pop_and_check_object(Register r) {
3003   __ pop_ptr(r);
3004   __ null_check(r, Rtemp);  // for field access must check obj.
3005   __ verify_oop(r);
3006 }
3007 
3008 
3009 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3010   transition(vtos, vtos);
3011 
3012   const Register Roffset  = R2_tmp;
3013   const Register Robj     = R3_tmp;
3014   const Register Rcache   = R4_tmp;
3015   const Register Rflagsav = Rtmp_save0;  // R4/R19
3016   const Register Rindex   = R5_tmp;
3017   const Register Rflags   = R5_tmp;
3018 
3019   const bool gen_volatile_check = os::is_MP();
3020 
3021   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3022   jvmti_post_field_access(Rcache, Rindex, is_static, false);
3023   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3024 
3025   if (gen_volatile_check) {
3026     __ mov(Rflagsav, Rflags);
3027   }
3028 
3029   if (!is_static) pop_and_check_object(Robj);
3030 
3031   Label Done, Lint, Ltable, shouldNotReachHere;
3032   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3033 
3034   // compute type
3035   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3036   // Make sure we don't need to mask flags after the above shift
3037   ConstantPoolCacheEntry::verify_tos_state_shift();
3038 
3039   // There are actually two versions of implementation of getfield/getstatic:
3040   //
3041   // 32-bit ARM:
3042   // 1) Table switch using add(PC,...) instruction (fast_version)
3043   // 2) Table switch using ldr(PC,...) instruction
3044   //
3045   // AArch64:
3046   // 1) Table switch using adr/add/br instructions (fast_version)
3047   // 2) Table switch using adr/ldr/br instructions
3048   //
3049   // First version requires fixed size of code block for each case and
3050   // can not be used in RewriteBytecodes and VerifyOops
3051   // modes.
3052 
3053   // Size of fixed size code block for fast_version
3054   const int log_max_block_size = 2;
3055   const int max_block_size = 1 << log_max_block_size;
3056 
3057   // Decide if fast version is enabled
3058   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3059 
3060   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3061   // atos requires additional processing in slow version.
3062   // On AArch64 atos and itos cannot be merged.
3063   bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3064 
3065   assert(number_of_states == 10, "number of tos states should be equal to 9");
3066 
3067   __ cmp(Rflags, itos);
3068 #ifdef AARCH64
3069   __ b(Lint, eq);
3070 
3071   if(fast_version) {
3072     __ adr(Rtemp, Lbtos);
3073     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3074     __ br(Rtemp);
3075   } else {
3076     __ adr(Rtemp, Ltable);
3077     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3078     __ br(Rtemp);
3079   }
3080 #else
3081   if(atos_merged_with_itos) {
3082     __ cmp(Rflags, atos, ne);
3083   }
3084 
3085   // table switch by type
3086   if(fast_version) {
3087     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3088   } else {
3089     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3090   }
3091 
3092   // jump to itos/atos case
3093   __ b(Lint);
3094 #endif // AARCH64
3095 
3096   // table with addresses for slow version
3097   if (fast_version) {
3098     // nothing to do
3099   } else  {
3100     AARCH64_ONLY(__ align(wordSize));
3101     __ bind(Ltable);
3102     __ emit_address(Lbtos);
3103     __ emit_address(Lztos);
3104     __ emit_address(Lctos);
3105     __ emit_address(Lstos);
3106     __ emit_address(Litos);
3107     __ emit_address(Lltos);
3108     __ emit_address(Lftos);
3109     __ emit_address(Ldtos);
3110     __ emit_address(Latos);
3111   }
3112 
3113 #ifdef ASSERT
3114   int seq = 0;
3115 #endif
3116   // btos
3117   {
3118     assert(btos == seq++, "btos has unexpected value");
3119     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3120     __ bind(Lbtos);
3121     __ ldrsb(R0_tos, Address(Robj, Roffset));
3122     __ push(btos);
3123     // Rewrite bytecode to be faster
3124     if (!is_static && rc == may_rewrite) {
3125       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3126     }
3127     __ b(Done);
3128   }
3129 
3130   // ztos (same as btos for getfield)
3131   {
3132     assert(ztos == seq++, "btos has unexpected value");
3133     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3134     __ bind(Lztos);
3135     __ ldrsb(R0_tos, Address(Robj, Roffset));
3136     __ push(ztos);
3137     // Rewrite bytecode to be faster (use btos fast getfield)
3138     if (!is_static && rc == may_rewrite) {
3139       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3140     }
3141     __ b(Done);
3142   }
3143 
3144   // ctos
3145   {
3146     assert(ctos == seq++, "ctos has unexpected value");
3147     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3148     __ bind(Lctos);
3149     __ ldrh(R0_tos, Address(Robj, Roffset));
3150     __ push(ctos);
3151     if (!is_static && rc == may_rewrite) {
3152       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3153     }
3154     __ b(Done);
3155   }
3156 
3157   // stos
3158   {
3159     assert(stos == seq++, "stos has unexpected value");
3160     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3161     __ bind(Lstos);
3162     __ ldrsh(R0_tos, Address(Robj, Roffset));
3163     __ push(stos);
3164     if (!is_static && rc == may_rewrite) {
3165       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3166     }
3167     __ b(Done);
3168   }
3169 
3170   // itos
3171   {
3172     assert(itos == seq++, "itos has unexpected value");
3173     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3174     __ bind(Litos);
3175     __ b(shouldNotReachHere);
3176   }
3177 
3178   // ltos
3179   {
3180     assert(ltos == seq++, "ltos has unexpected value");
3181     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3182     __ bind(Lltos);
3183 #ifdef AARCH64
3184     __ ldr(R0_tos, Address(Robj, Roffset));
3185 #else
3186     __ add(Roffset, Robj, Roffset);
3187     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3188 #endif // AARCH64
3189     __ push(ltos);
3190     if (!is_static && rc == may_rewrite) {
3191       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3192     }
3193     __ b(Done);
3194   }
3195 
3196   // ftos
3197   {
3198     assert(ftos == seq++, "ftos has unexpected value");
3199     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3200     __ bind(Lftos);
3201     // floats and ints are placed on stack in same way, so
3202     // we can use push(itos) to transfer value without using VFP
3203     __ ldr_u32(R0_tos, Address(Robj, Roffset));
3204     __ push(itos);
3205     if (!is_static && rc == may_rewrite) {
3206       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3207     }
3208     __ b(Done);
3209   }
3210 
3211   // dtos
3212   {
3213     assert(dtos == seq++, "dtos has unexpected value");
3214     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3215     __ bind(Ldtos);
3216     // doubles and longs are placed on stack in the same way, so
3217     // we can use push(ltos) to transfer value without using VFP
3218 #ifdef AARCH64
3219     __ ldr(R0_tos, Address(Robj, Roffset));
3220 #else
3221     __ add(Rtemp, Robj, Roffset);
3222     __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3223 #endif // AARCH64
3224     __ push(ltos);
3225     if (!is_static && rc == may_rewrite) {
3226       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3227     }
3228     __ b(Done);
3229   }
3230 
3231   // atos
3232   {
3233     assert(atos == seq++, "atos has unexpected value");
3234 
3235     // atos case for AArch64 and slow version on 32-bit ARM
3236     if(!atos_merged_with_itos) {
3237       __ bind(Latos);
3238       do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3239       __ push(atos);
3240       // Rewrite bytecode to be faster
3241       if (!is_static && rc == may_rewrite) {
3242         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3243       }
3244       __ b(Done);
3245     }
3246   }
3247 
3248   assert(vtos == seq++, "vtos has unexpected value");
3249 
3250   __ bind(shouldNotReachHere);
3251   __ should_not_reach_here();
3252 
3253   // itos and atos cases are frequent so it makes sense to move them out of table switch
3254   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3255 
3256   __ bind(Lint);
3257   __ ldr_s32(R0_tos, Address(Robj, Roffset));
3258   __ push(itos);
3259   // Rewrite bytecode to be faster
3260   if (!is_static && rc == may_rewrite) {
3261     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3262   }
3263 
3264   __ bind(Done);
3265 
3266   if (gen_volatile_check) {
3267     // Check for volatile field
3268     Label notVolatile;
3269     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3270 
3271     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3272 
3273     __ bind(notVolatile);
3274   }
3275 
3276 }
3277 
3278 void TemplateTable::getfield(int byte_no) {
3279   getfield_or_static(byte_no, false);
3280 }
3281 
3282 void TemplateTable::nofast_getfield(int byte_no) {
3283   getfield_or_static(byte_no, false, may_not_rewrite);
3284 }
3285 
3286 void TemplateTable::getstatic(int byte_no) {
3287   getfield_or_static(byte_no, true);
3288 }
3289 
3290 
3291 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3292 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3293 // except cache and index registers which are preserved.
3294 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3295   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3296   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3297 
3298   if (__ can_post_field_modification()) {
3299     // Check to see if a field modification watch has been set before we take
3300     // the time to call into the VM.
3301     Label Lcontinue;
3302 
3303     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3304     __ cbz(Rtemp, Lcontinue);
3305 
3306     if (is_static) {
3307       // Life is simple.  Null out the object pointer.
3308       __ mov(R1, 0);
3309     } else {
3310       // Life is harder. The stack holds the value on top, followed by the object.
3311       // We don't know the size of the value, though; it could be one or two words
3312       // depending on its type. As a result, we must find the type to determine where
3313       // the object is.
3314 
3315       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3316       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3317 
3318       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3319       // Make sure we don't need to mask Rtemp after the above shift
3320       ConstantPoolCacheEntry::verify_tos_state_shift();
3321 
3322       __ cmp(Rtemp, ltos);
3323       __ cond_cmp(Rtemp, dtos, ne);
3324 #ifdef AARCH64
3325       __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3326       __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3327       __ mov(R1, Rtemp, eq);
3328       __ ldr(R1, Address(Rstack_top, R1));
3329 #else
3330       // two word value (ltos/dtos)
3331       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3332 
3333       // one word value (not ltos, dtos)
3334       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3335 #endif // AARCH64
3336     }
3337 
3338     // cache entry pointer
3339     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3340     __ add(R2, R2, in_bytes(cp_base_offset));
3341 
3342     // object (tos)
3343     __ mov(R3, Rstack_top);
3344 
3345     // R1: object pointer set up above (NULL if static)
3346     // R2: cache entry pointer
3347     // R3: value object on the stack
3348     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3349                R1, R2, R3);
3350     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3351 
3352     __ bind(Lcontinue);
3353   }
3354 }
3355 
3356 
3357 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3358   transition(vtos, vtos);
3359 
3360   const Register Roffset  = R2_tmp;
3361   const Register Robj     = R3_tmp;
3362   const Register Rcache   = R4_tmp;
3363   const Register Rflagsav = Rtmp_save0;  // R4/R19
3364   const Register Rindex   = R5_tmp;
3365   const Register Rflags   = R5_tmp;
3366 
3367   const bool gen_volatile_check = os::is_MP();
3368 
3369   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3370   jvmti_post_field_mod(Rcache, Rindex, is_static);
3371   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3372 
3373   if (gen_volatile_check) {
3374     // Check for volatile field
3375     Label notVolatile;
3376     __ mov(Rflagsav, Rflags);
3377     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3378 
3379     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3380 
3381     __ bind(notVolatile);
3382   }
3383 
3384   Label Done, Lint, shouldNotReachHere;
3385   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3386 
3387   // compute type
3388   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3389   // Make sure we don't need to mask flags after the above shift
3390   ConstantPoolCacheEntry::verify_tos_state_shift();
3391 
3392   // There are actually two versions of implementation of putfield/putstatic:
3393   //
3394   // 32-bit ARM:
3395   // 1) Table switch using add(PC,...) instruction (fast_version)
3396   // 2) Table switch using ldr(PC,...) instruction
3397   //
3398   // AArch64:
3399   // 1) Table switch using adr/add/br instructions (fast_version)
3400   // 2) Table switch using adr/ldr/br instructions
3401   //
3402   // First version requires fixed size of code block for each case and
3403   // can not be used in RewriteBytecodes and VerifyOops
3404   // modes.
3405 
3406   // Size of fixed size code block for fast_version (in instructions)
3407   const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3408   const int max_block_size = 1 << log_max_block_size;
3409 
3410   // Decide if fast version is enabled
3411   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3412 
3413   assert(number_of_states == 10, "number of tos states should be equal to 9");
3414 
3415   // itos case is frequent and is moved outside table switch
3416   __ cmp(Rflags, itos);
3417 
3418 #ifdef AARCH64
3419   __ b(Lint, eq);
3420 
3421   if (fast_version) {
3422     __ adr(Rtemp, Lbtos);
3423     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3424     __ br(Rtemp);
3425   } else {
3426     __ adr(Rtemp, Ltable);
3427     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3428     __ br(Rtemp);
3429   }
3430 #else
3431   // table switch by type
3432   if (fast_version) {
3433     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3434   } else  {
3435     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3436   }
3437 
3438   // jump to itos case
3439   __ b(Lint);
3440 #endif // AARCH64
3441 
3442   // table with addresses for slow version
3443   if (fast_version) {
3444     // nothing to do
3445   } else  {
3446     AARCH64_ONLY(__ align(wordSize));
3447     __ bind(Ltable);
3448     __ emit_address(Lbtos);
3449     __ emit_address(Lztos);
3450     __ emit_address(Lctos);
3451     __ emit_address(Lstos);
3452     __ emit_address(Litos);
3453     __ emit_address(Lltos);
3454     __ emit_address(Lftos);
3455     __ emit_address(Ldtos);
3456     __ emit_address(Latos);
3457   }
3458 
3459 #ifdef ASSERT
3460   int seq = 0;
3461 #endif
3462   // btos
3463   {
3464     assert(btos == seq++, "btos has unexpected value");
3465     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3466     __ bind(Lbtos);
3467     __ pop(btos);
3468     if (!is_static) pop_and_check_object(Robj);
3469     __ strb(R0_tos, Address(Robj, Roffset));
3470     if (!is_static && rc == may_rewrite) {
3471       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3472     }
3473     __ b(Done);
3474   }
3475 
3476   // ztos
3477   {
3478     assert(ztos == seq++, "ztos has unexpected value");
3479     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3480     __ bind(Lztos);
3481     __ pop(ztos);
3482     if (!is_static) pop_and_check_object(Robj);
3483     __ and_32(R0_tos, R0_tos, 1);
3484     __ strb(R0_tos, Address(Robj, Roffset));
3485     if (!is_static && rc == may_rewrite) {
3486       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3487     }
3488     __ b(Done);
3489   }
3490 
3491   // ctos
3492   {
3493     assert(ctos == seq++, "ctos has unexpected value");
3494     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3495     __ bind(Lctos);
3496     __ pop(ctos);
3497     if (!is_static) pop_and_check_object(Robj);
3498     __ strh(R0_tos, Address(Robj, Roffset));
3499     if (!is_static && rc == may_rewrite) {
3500       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3501     }
3502     __ b(Done);
3503   }
3504 
3505   // stos
3506   {
3507     assert(stos == seq++, "stos has unexpected value");
3508     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3509     __ bind(Lstos);
3510     __ pop(stos);
3511     if (!is_static) pop_and_check_object(Robj);
3512     __ strh(R0_tos, Address(Robj, Roffset));
3513     if (!is_static && rc == may_rewrite) {
3514       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3515     }
3516     __ b(Done);
3517   }
3518 
3519   // itos
3520   {
3521     assert(itos == seq++, "itos has unexpected value");
3522     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3523     __ bind(Litos);
3524     __ b(shouldNotReachHere);
3525   }
3526 
3527   // ltos
3528   {
3529     assert(ltos == seq++, "ltos has unexpected value");
3530     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3531     __ bind(Lltos);
3532     __ pop(ltos);
3533     if (!is_static) pop_and_check_object(Robj);
3534 #ifdef AARCH64
3535     __ str(R0_tos, Address(Robj, Roffset));
3536 #else
3537     __ add(Roffset, Robj, Roffset);
3538     __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3539 #endif // AARCH64
3540     if (!is_static && rc == may_rewrite) {
3541       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3542     }
3543     __ b(Done);
3544   }
3545 
3546   // ftos
3547   {
3548     assert(ftos == seq++, "ftos has unexpected value");
3549     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3550     __ bind(Lftos);
3551     // floats and ints are placed on stack in the same way, so
3552     // we can use pop(itos) to transfer value without using VFP
3553     __ pop(itos);
3554     if (!is_static) pop_and_check_object(Robj);
3555     __ str_32(R0_tos, Address(Robj, Roffset));
3556     if (!is_static && rc == may_rewrite) {
3557       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3558     }
3559     __ b(Done);
3560   }
3561 
3562   // dtos
3563   {
3564     assert(dtos == seq++, "dtos has unexpected value");
3565     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3566     __ bind(Ldtos);
3567     // doubles and longs are placed on stack in the same way, so
3568     // we can use pop(ltos) to transfer value without using VFP
3569     __ pop(ltos);
3570     if (!is_static) pop_and_check_object(Robj);
3571 #ifdef AARCH64
3572     __ str(R0_tos, Address(Robj, Roffset));
3573 #else
3574     __ add(Rtemp, Robj, Roffset);
3575     __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3576 #endif // AARCH64
3577     if (!is_static && rc == may_rewrite) {
3578       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3579     }
3580     __ b(Done);
3581   }
3582 
3583   // atos
3584   {
3585     assert(atos == seq++, "dtos has unexpected value");
3586     __ bind(Latos);
3587     __ pop(atos);
3588     if (!is_static) pop_and_check_object(Robj);
3589     // Store into the field
3590     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3591     if (!is_static && rc == may_rewrite) {
3592       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3593     }
3594     __ b(Done);
3595   }
3596 
3597   __ bind(shouldNotReachHere);
3598   __ should_not_reach_here();
3599 
3600   // itos case is frequent and is moved outside table switch
3601   __ bind(Lint);
3602   __ pop(itos);
3603   if (!is_static) pop_and_check_object(Robj);
3604   __ str_32(R0_tos, Address(Robj, Roffset));
3605   if (!is_static && rc == may_rewrite) {
3606     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3607   }
3608 
3609   __ bind(Done);
3610 
3611   if (gen_volatile_check) {
3612     Label notVolatile;
3613     if (is_static) {
3614       // Just check for volatile. Memory barrier for static final field
3615       // is handled by class initialization.
3616       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3617       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3618       __ bind(notVolatile);
3619     } else {
3620       // Check for volatile field and final field
3621       Label skipMembar;
3622 
3623       __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3624                        1 << ConstantPoolCacheEntry::is_final_shift);
3625       __ b(skipMembar, eq);
3626 
3627       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3628 
3629       // StoreLoad barrier after volatile field write
3630       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3631       __ b(skipMembar);
3632 
3633       // StoreStore barrier after final field write
3634       __ bind(notVolatile);
3635       volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3636 
3637       __ bind(skipMembar);
3638     }
3639   }
3640 
3641 }
3642 
3643 void TemplateTable::putfield(int byte_no) {
3644   putfield_or_static(byte_no, false);
3645 }
3646 
3647 void TemplateTable::nofast_putfield(int byte_no) {
3648   putfield_or_static(byte_no, false, may_not_rewrite);
3649 }
3650 
3651 void TemplateTable::putstatic(int byte_no) {
3652   putfield_or_static(byte_no, true);
3653 }
3654 
3655 
3656 void TemplateTable::jvmti_post_fast_field_mod() {
3657   // This version of jvmti_post_fast_field_mod() is not used on ARM
3658   Unimplemented();
3659 }
3660 
3661 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3662 // but preserves tosca with the given state.
3663 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3664   if (__ can_post_field_modification()) {
3665     // Check to see if a field modification watch has been set before we take
3666     // the time to call into the VM.
3667     Label done;
3668 
3669     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3670     __ cbz(R2, done);
3671 
3672     __ pop_ptr(R3);               // copy the object pointer from tos
3673     __ verify_oop(R3);
3674     __ push_ptr(R3);              // put the object pointer back on tos
3675 
3676     __ push(state);               // save value on the stack
3677 
3678     // access constant pool cache entry
3679     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3680 
3681     __ mov(R1, R3);
3682     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3683     __ mov(R3, Rstack_top); // put tos addr into R3
3684 
3685     // R1: object pointer copied above
3686     // R2: cache entry pointer
3687     // R3: jvalue object on the stack
3688     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3689 
3690     __ pop(state);                // restore value
3691 
3692     __ bind(done);
3693   }
3694 }
3695 
3696 
3697 void TemplateTable::fast_storefield(TosState state) {
3698   transition(state, vtos);
3699 
3700   ByteSize base = ConstantPoolCache::base_offset();
3701 
3702   jvmti_post_fast_field_mod(state);
3703 
3704   const Register Rcache  = R2_tmp;
3705   const Register Rindex  = R3_tmp;
3706   const Register Roffset = R3_tmp;
3707   const Register Rflags  = Rtmp_save0; // R4/R19
3708   const Register Robj    = R5_tmp;
3709 
3710   const bool gen_volatile_check = os::is_MP();
3711 
3712   // access constant pool cache
3713   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3714 
3715   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3716 
3717   if (gen_volatile_check) {
3718     // load flags to test volatile
3719     __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3720   }
3721 
3722   // replace index with field offset from cache entry
3723   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3724 
3725   if (gen_volatile_check) {
3726     // Check for volatile store
3727     Label notVolatile;
3728     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3729 
3730     // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3731     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3732 
3733     __ bind(notVolatile);
3734   }
3735 
3736   // Get object from stack
3737   pop_and_check_object(Robj);
3738 
3739   // access field
3740   switch (bytecode()) {
3741     case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3742                                      // fall through
3743     case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3744     case Bytecodes::_fast_sputfield: // fall through
3745     case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3746     case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3747 #ifdef AARCH64
3748     case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
3749     case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3750     case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3751 #else
3752     case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3753                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3754 
3755 #ifdef __SOFTFP__
3756     case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
3757     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3758                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3759 #else
3760     case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3761                                      __ fsts(S0_tos, Address(Robj));          break;
3762     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3763                                      __ fstd(D0_tos, Address(Robj));          break;
3764 #endif // __SOFTFP__
3765 #endif // AARCH64
3766 
3767     case Bytecodes::_fast_aputfield:
3768       do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3769       break;
3770 
3771     default:
3772       ShouldNotReachHere();
3773   }
3774 
3775   if (gen_volatile_check) {
3776     Label notVolatile;
3777     Label skipMembar;
3778     __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3779                    1 << ConstantPoolCacheEntry::is_final_shift);
3780     __ b(skipMembar, eq);
3781 
3782     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3783 
3784     // StoreLoad barrier after volatile field write
3785     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3786     __ b(skipMembar);
3787 
3788     // StoreStore barrier after final field write
3789     __ bind(notVolatile);
3790     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3791 
3792     __ bind(skipMembar);
3793   }
3794 }
3795 
3796 
3797 void TemplateTable::fast_accessfield(TosState state) {
3798   transition(atos, state);
3799 
3800   // do the JVMTI work here to avoid disturbing the register state below
3801   if (__ can_post_field_access()) {
3802     // Check to see if a field access watch has been set before we take
3803     // the time to call into the VM.
3804     Label done;
3805     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3806     __ cbz(R2, done);
3807     // access constant pool cache entry
3808     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3809     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3810     __ verify_oop(R0_tos);
3811     __ mov(R1, R0_tos);
3812     // R1: object pointer copied above
3813     // R2: cache entry pointer
3814     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3815     __ pop_ptr(R0_tos);   // restore object pointer
3816 
3817     __ bind(done);
3818   }
3819 
3820   const Register Robj    = R0_tos;
3821   const Register Rcache  = R2_tmp;
3822   const Register Rflags  = R2_tmp;
3823   const Register Rindex  = R3_tmp;
3824   const Register Roffset = R3_tmp;
3825 
3826   const bool gen_volatile_check = os::is_MP();
3827 
3828   // access constant pool cache
3829   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3830   // replace index with field offset from cache entry
3831   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3832   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3833 
3834   if (gen_volatile_check) {
3835     // load flags to test volatile
3836     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3837   }
3838 
3839   __ verify_oop(Robj);
3840   __ null_check(Robj, Rtemp);
3841 
3842   // access field
3843   switch (bytecode()) {
3844     case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
3845     case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
3846     case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
3847     case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
3848 #ifdef AARCH64
3849     case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3850     case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
3851     case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
3852 #else
3853     case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
3854                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3855 #ifdef __SOFTFP__
3856     case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3857     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
3858                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3859 #else
3860     case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
3861     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
3862 #endif // __SOFTFP__
3863 #endif // AARCH64
3864     case Bytecodes::_fast_agetfield: do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
3865     default:
3866       ShouldNotReachHere();
3867   }
3868 
3869   if (gen_volatile_check) {
3870     // Check for volatile load
3871     Label notVolatile;
3872     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3873 
3874     // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
3875     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3876 
3877     __ bind(notVolatile);
3878   }
3879 }
3880 
3881 
3882 void TemplateTable::fast_xaccess(TosState state) {
3883   transition(vtos, state);
3884 
3885   const Register Robj = R1_tmp;
3886   const Register Rcache = R2_tmp;
3887   const Register Rindex = R3_tmp;
3888   const Register Roffset = R3_tmp;
3889   const Register Rflags = R4_tmp;
3890   Label done;
3891 
3892   // get receiver
3893   __ ldr(Robj, aaddress(0));
3894 
3895   // access constant pool cache
3896   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
3897   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3898   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3899 
3900   const bool gen_volatile_check = os::is_MP();
3901 
3902   if (gen_volatile_check) {
3903     // load flags to test volatile
3904     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3905   }
3906 
3907   // make sure exception is reported in correct bcp range (getfield is next instruction)
3908   __ add(Rbcp, Rbcp, 1);
3909   __ null_check(Robj, Rtemp);
3910   __ sub(Rbcp, Rbcp, 1);
3911 
3912 #ifdef AARCH64
3913   if (gen_volatile_check) {
3914     Label notVolatile;
3915     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3916 
3917     __ add(Rtemp, Robj, Roffset);
3918 
3919     if (state == itos) {
3920       __ ldar_w(R0_tos, Rtemp);
3921     } else if (state == atos) {
3922       if (UseCompressedOops) {
3923         __ ldar_w(R0_tos, Rtemp);
3924         __ decode_heap_oop(R0_tos);
3925       } else {
3926         __ ldar(R0_tos, Rtemp);
3927       }
3928       __ verify_oop(R0_tos);
3929     } else if (state == ftos) {
3930       __ ldar_w(R0_tos, Rtemp);
3931       __ fmov_sw(S0_tos, R0_tos);
3932     } else {
3933       ShouldNotReachHere();
3934     }
3935     __ b(done);
3936 
3937     __ bind(notVolatile);
3938   }
3939 #endif // AARCH64
3940 
3941   if (state == itos) {
3942     __ ldr_s32(R0_tos, Address(Robj, Roffset));
3943   } else if (state == atos) {
3944     do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3945     __ verify_oop(R0_tos);
3946   } else if (state == ftos) {
3947 #ifdef AARCH64
3948     __ ldr_s(S0_tos, Address(Robj, Roffset));
3949 #else
3950 #ifdef __SOFTFP__
3951     __ ldr(R0_tos, Address(Robj, Roffset));
3952 #else
3953     __ add(Roffset, Robj, Roffset);
3954     __ flds(S0_tos, Address(Roffset));
3955 #endif // __SOFTFP__
3956 #endif // AARCH64
3957   } else {
3958     ShouldNotReachHere();
3959   }
3960 
3961 #ifndef AARCH64
3962   if (gen_volatile_check) {
3963     // Check for volatile load
3964     Label notVolatile;
3965     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3966 
3967     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3968 
3969     __ bind(notVolatile);
3970   }
3971 #endif // !AARCH64
3972 
3973   __ bind(done);
3974 }
3975 
3976 
3977 
3978 //----------------------------------------------------------------------------------------------------
3979 // Calls
3980 
3981 void TemplateTable::count_calls(Register method, Register temp) {
3982   // implemented elsewhere
3983   ShouldNotReachHere();
3984 }
3985 
3986 
3987 void TemplateTable::prepare_invoke(int byte_no,
3988                                    Register method,  // linked method (or i-klass)
3989                                    Register index,   // itable index, MethodType, etc.
3990                                    Register recv,    // if caller wants to see it
3991                                    Register flags    // if caller wants to test it
3992                                    ) {
3993   // determine flags
3994   const Bytecodes::Code code = bytecode();
3995   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3996   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3997   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3998   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3999   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
4000   const bool load_receiver       = (recv != noreg);
4001   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4002   assert(recv  == noreg || recv  == R2, "");
4003   assert(flags == noreg || flags == R3, "");
4004 
4005   // setup registers & access constant pool cache
4006   if (recv  == noreg)  recv  = R2;
4007   if (flags == noreg)  flags = R3;
4008   const Register temp = Rtemp;
4009   const Register ret_type = R1_tmp;
4010   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4011 
4012   // save 'interpreter return address'
4013   __ save_bcp();
4014 
4015   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4016 
4017   // maybe push extra argument
4018   if (is_invokedynamic || is_invokehandle) {
4019     Label L_no_push;
4020     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4021     __ mov(temp, index);
4022     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4023     __ load_resolved_reference_at_index(index, temp);
4024     __ verify_oop(index);
4025     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
4026     __ bind(L_no_push);
4027   }
4028 
4029   // load receiver if needed (after extra argument is pushed so parameter size is correct)
4030   if (load_receiver) {
4031     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
4032     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4033     __ ldr(recv, recv_addr);
4034     __ verify_oop(recv);
4035   }
4036 
4037   // compute return type
4038   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4039   // Make sure we don't need to mask flags after the above shift
4040   ConstantPoolCacheEntry::verify_tos_state_shift();
4041   // load return address
4042   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4043     __ mov_slow(temp, table);
4044     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4045   }
4046 }
4047 
4048 
4049 void TemplateTable::invokevirtual_helper(Register index,
4050                                          Register recv,
4051                                          Register flags) {
4052 
4053   const Register recv_klass = R2_tmp;
4054 
4055   assert_different_registers(index, recv, flags, Rtemp);
4056   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4057 
4058   // Test for an invoke of a final method
4059   Label notFinal;
4060   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4061 
4062   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4063 
4064   // do the call - the index is actually the method to call
4065 
4066   // It's final, need a null check here!
4067   __ null_check(recv, Rtemp);
4068 
4069   // profile this call
4070   __ profile_final_call(R0_tmp);
4071 
4072   __ jump_from_interpreted(Rmethod);
4073 
4074   __ bind(notFinal);
4075 
4076   // get receiver klass
4077   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4078   __ load_klass(recv_klass, recv);
4079 
4080   // profile this call
4081   __ profile_virtual_call(R0_tmp, recv_klass);
4082 
4083   // get target Method* & entry point
4084   const int base = in_bytes(Klass::vtable_start_offset());
4085   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4086   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4087   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4088   __ jump_from_interpreted(Rmethod);
4089 }
4090 
4091 void TemplateTable::invokevirtual(int byte_no) {
4092   transition(vtos, vtos);
4093   assert(byte_no == f2_byte, "use this argument");
4094 
4095   const Register Rrecv  = R2_tmp;
4096   const Register Rflags = R3_tmp;
4097 
4098   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4099 
4100   // Rmethod: index
4101   // Rrecv:   receiver
4102   // Rflags:  flags
4103   // LR:      return address
4104 
4105   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4106 }
4107 
4108 
4109 void TemplateTable::invokespecial(int byte_no) {
4110   transition(vtos, vtos);
4111   assert(byte_no == f1_byte, "use this argument");
4112   const Register Rrecv  = R2_tmp;
4113   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4114   __ verify_oop(Rrecv);
4115   __ null_check(Rrecv, Rtemp);
4116   // do the call
4117   __ profile_call(Rrecv);
4118   __ jump_from_interpreted(Rmethod);
4119 }
4120 
4121 
4122 void TemplateTable::invokestatic(int byte_no) {
4123   transition(vtos, vtos);
4124   assert(byte_no == f1_byte, "use this argument");
4125   prepare_invoke(byte_no, Rmethod);
4126   // do the call
4127   __ profile_call(R2_tmp);
4128   __ jump_from_interpreted(Rmethod);
4129 }
4130 
4131 
4132 void TemplateTable::fast_invokevfinal(int byte_no) {
4133   transition(vtos, vtos);
4134   assert(byte_no == f2_byte, "use this argument");
4135   __ stop("fast_invokevfinal is not used on ARM");
4136 }
4137 
4138 
4139 void TemplateTable::invokeinterface(int byte_no) {
4140   transition(vtos, vtos);
4141   assert(byte_no == f1_byte, "use this argument");
4142 
4143   const Register Ritable = R1_tmp;
4144   const Register Rrecv   = R2_tmp;
4145   const Register Rinterf = R5_tmp;
4146   const Register Rindex  = R4_tmp;
4147   const Register Rflags  = R3_tmp;
4148   const Register Rklass  = R3_tmp;
4149 
4150   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4151 
4152   // Special case of invokeinterface called for virtual method of
4153   // java.lang.Object.  See cpCacheOop.cpp for details.
4154   // This code isn't produced by javac, but could be produced by
4155   // another compliant java compiler.
4156   Label notMethod;
4157   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
4158 
4159   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4160   __ bind(notMethod);
4161 
4162   // Get receiver klass into Rklass - also a null check
4163   __ load_klass(Rklass, Rrecv);
4164 
4165   Label no_such_interface;
4166 
4167   // Receiver subtype check against REFC.
4168   __ lookup_interface_method(// inputs: rec. class, interface
4169                              Rklass, Rinterf, noreg,
4170                              // outputs:  scan temp. reg1, scan temp. reg2
4171                              noreg, Ritable, Rtemp,
4172                              no_such_interface);
4173 
4174   // profile this call
4175   __ profile_virtual_call(R0_tmp, Rklass);
4176 
4177   // Get declaring interface class from method
4178   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4179   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4180   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4181 
4182   // Get itable index from method
4183   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4184   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4185   __ neg(Rindex, Rtemp);
4186 
4187   __ lookup_interface_method(// inputs: rec. class, interface
4188                              Rklass, Rinterf, Rindex,
4189                              // outputs:  scan temp. reg1, scan temp. reg2
4190                              Rmethod, Ritable, Rtemp,
4191                              no_such_interface);
4192 
4193   // Rmethod: Method* to call
4194 
4195   // Check for abstract method error
4196   // Note: This should be done more efficiently via a throw_abstract_method_error
4197   //       interpreter entry point and a conditional jump to it in case of a null
4198   //       method.
4199   { Label L;
4200     __ cbnz(Rmethod, L);
4201     // throw exception
4202     // note: must restore interpreter registers to canonical
4203     //       state for exception handling to work correctly!
4204     __ restore_method();
4205     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4206     // the call_VM checks for exception, so we should never return here.
4207     __ should_not_reach_here();
4208     __ bind(L);
4209   }
4210 
4211   // do the call
4212   __ jump_from_interpreted(Rmethod);
4213 
4214   // throw exception
4215   __ bind(no_such_interface);
4216   __ restore_method();
4217   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4218   // the call_VM checks for exception, so we should never return here.
4219   __ should_not_reach_here();
4220 }
4221 
4222 void TemplateTable::invokehandle(int byte_no) {
4223   transition(vtos, vtos);
4224 
4225   // TODO-AARCH64 review register usage
4226   const Register Rrecv  = R2_tmp;
4227   const Register Rmtype = R4_tmp;
4228   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4229 
4230   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4231   __ null_check(Rrecv, Rtemp);
4232 
4233   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
4234   // Rmethod: MH.invokeExact_MT method (from f2)
4235 
4236   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
4237 
4238   // do the call
4239   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
4240   __ mov(Rmethod, R5_method);
4241   __ jump_from_interpreted(Rmethod);
4242 }
4243 
4244 void TemplateTable::invokedynamic(int byte_no) {
4245   transition(vtos, vtos);
4246 
4247   // TODO-AARCH64 review register usage
4248   const Register Rcallsite = R4_tmp;
4249   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4250 
4251   prepare_invoke(byte_no, R5_method, Rcallsite);
4252 
4253   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4254   // Rmethod:   MH.linkToCallSite method (from f2)
4255 
4256   // Note:  Rcallsite is already pushed by prepare_invoke
4257 
4258   if (ProfileInterpreter) {
4259     __ profile_call(R2_tmp);
4260   }
4261 
4262   // do the call
4263   __ mov(Rmethod, R5_method);
4264   __ jump_from_interpreted(Rmethod);
4265 }
4266 
4267 //----------------------------------------------------------------------------------------------------
4268 // Allocation
4269 
4270 void TemplateTable::_new() {
4271   transition(vtos, atos);
4272 
4273   const Register Robj   = R0_tos;
4274   const Register Rcpool = R1_tmp;
4275   const Register Rindex = R2_tmp;
4276   const Register Rtags  = R3_tmp;
4277   const Register Rsize  = R3_tmp;
4278 
4279   Register Rklass = R4_tmp;
4280   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4281   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4282 
4283   Label slow_case;
4284   Label done;
4285   Label initialize_header;
4286   Label initialize_object;  // including clearing the fields
4287 
4288   const bool allow_shared_alloc =
4289     Universe::heap()->supports_inline_contig_alloc();
4290 
4291   // Literals
4292   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4293 
4294   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4295   __ get_cpool_and_tags(Rcpool, Rtags);
4296 
4297   // Make sure the class we're about to instantiate has been resolved.
4298   // This is done before loading InstanceKlass to be consistent with the order
4299   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4300   const int tags_offset = Array<u1>::base_offset_in_bytes();
4301   __ add(Rtemp, Rtags, Rindex);
4302 
4303 #ifdef AARCH64
4304   __ add(Rtemp, Rtemp, tags_offset);
4305   __ ldarb(Rtemp, Rtemp);
4306 #else
4307   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4308 
4309   // use Rklass as a scratch
4310   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4311 #endif // AARCH64
4312 
4313   // get InstanceKlass
4314   __ cmp(Rtemp, JVM_CONSTANT_Class);
4315   __ b(slow_case, ne);
4316   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4317 
4318   // make sure klass is initialized & doesn't have finalizer
4319   // make sure klass is fully initialized
4320   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4321   __ cmp(Rtemp, InstanceKlass::fully_initialized);
4322   __ b(slow_case, ne);
4323 
4324   // get instance_size in InstanceKlass (scaled to a count of bytes)
4325   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4326 
4327   // test to see if it has a finalizer or is malformed in some way
4328   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4329   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4330 
4331   // Allocate the instance:
4332   //  If TLAB is enabled:
4333   //    Try to allocate in the TLAB.
4334   //    If fails, go to the slow path.
4335   //  Else If inline contiguous allocations are enabled:
4336   //    Try to allocate in eden.
4337   //    If fails due to heap end, go to slow path.
4338   //
4339   //  If TLAB is enabled OR inline contiguous is enabled:
4340   //    Initialize the allocation.
4341   //    Exit.
4342   //
4343   //  Go to slow path.
4344   if (UseTLAB) {
4345     const Register Rtlab_top = R1_tmp;
4346     const Register Rtlab_end = R2_tmp;
4347     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4348 
4349     __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4350     __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset())));
4351     __ add(Rtlab_top, Robj, Rsize);
4352     __ cmp(Rtlab_top, Rtlab_end);
4353     __ b(slow_case, hi);
4354     __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4355     if (ZeroTLAB) {
4356       // the fields have been already cleared
4357       __ b(initialize_header);
4358     } else {
4359       // initialize both the header and fields
4360       __ b(initialize_object);
4361     }
4362   } else {
4363     // Allocation in the shared Eden, if allowed.
4364     if (allow_shared_alloc) {
4365       const Register Rheap_top_addr = R2_tmp;
4366       const Register Rheap_top = R5_tmp;
4367       const Register Rheap_end = Rtemp;
4368       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4369 
4370       // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4371       __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4372 
4373       Label retry;
4374       __ bind(retry);
4375 
4376 #ifdef AARCH64
4377       __ ldxr(Robj, Rheap_top_addr);
4378 #else
4379       __ ldr(Robj, Address(Rheap_top_addr));
4380 #endif // AARCH64
4381 
4382       __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4383       __ add(Rheap_top, Robj, Rsize);
4384       __ cmp(Rheap_top, Rheap_end);
4385       __ b(slow_case, hi);
4386 
4387       // Update heap top atomically.
4388       // If someone beats us on the allocation, try again, otherwise continue.
4389 #ifdef AARCH64
4390       __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4391       __ cbnz_w(Rtemp2, retry);
4392 #else
4393       __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4394       __ b(retry, ne);
4395 #endif // AARCH64
4396 
4397       __ incr_allocated_bytes(Rsize, Rtemp);
4398     }
4399   }
4400 
4401   if (UseTLAB || allow_shared_alloc) {
4402     const Register Rzero0 = R1_tmp;
4403     const Register Rzero1 = R2_tmp;
4404     const Register Rzero_end = R5_tmp;
4405     const Register Rzero_cur = Rtemp;
4406     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4407 
4408     // The object is initialized before the header.  If the object size is
4409     // zero, go directly to the header initialization.
4410     __ bind(initialize_object);
4411     __ subs(Rsize, Rsize, sizeof(oopDesc));
4412     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4413     __ b(initialize_header, eq);
4414 
4415 #ifdef ASSERT
4416     // make sure Rsize is a multiple of 8
4417     Label L;
4418     __ tst(Rsize, 0x07);
4419     __ b(L, eq);
4420     __ stop("object size is not multiple of 8 - adjust this code");
4421     __ bind(L);
4422 #endif
4423 
4424 #ifdef AARCH64
4425     {
4426       Label loop;
4427       // Step back by 1 word if object size is not a multiple of 2*wordSize.
4428       assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4429       __ andr(Rtemp2, Rsize, (uintx)wordSize);
4430       __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4431 
4432       // Zero by 2 words per iteration.
4433       __ bind(loop);
4434       __ subs(Rsize, Rsize, 2*wordSize);
4435       __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4436       __ b(loop, gt);
4437     }
4438 #else
4439     __ mov(Rzero0, 0);
4440     __ mov(Rzero1, 0);
4441     __ add(Rzero_end, Rzero_cur, Rsize);
4442 
4443     // initialize remaining object fields: Rsize was a multiple of 8
4444     { Label loop;
4445       // loop is unrolled 2 times
4446       __ bind(loop);
4447       // #1
4448       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4449       __ cmp(Rzero_cur, Rzero_end);
4450       // #2
4451       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4452       __ cmp(Rzero_cur, Rzero_end, ne);
4453       __ b(loop, ne);
4454     }
4455 #endif // AARCH64
4456 
4457     // initialize object header only.
4458     __ bind(initialize_header);
4459     if (UseBiasedLocking) {
4460       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4461     } else {
4462       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4463     }
4464     // mark
4465     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4466 
4467     // klass
4468 #ifdef AARCH64
4469     __ store_klass_gap(Robj);
4470 #endif // AARCH64
4471     __ store_klass(Rklass, Robj); // blows Rklass:
4472     Rklass = noreg;
4473 
4474     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4475     if (DTraceAllocProbes) {
4476       // Trigger dtrace event for fastpath
4477       Label Lcontinue;
4478 
4479       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4480       __ cbz(Rtemp, Lcontinue);
4481 
4482       __ push(atos);
4483       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4484       __ pop(atos);
4485 
4486       __ bind(Lcontinue);
4487     }
4488 
4489     __ b(done);
4490   } else {
4491     // jump over literals
4492     __ b(slow_case);
4493   }
4494 
4495   if (allow_shared_alloc) {
4496     __ bind_literal(Lheap_top_addr);
4497   }
4498 
4499   // slow case
4500   __ bind(slow_case);
4501   __ get_constant_pool(Rcpool);
4502   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4503   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4504 
4505   // continue
4506   __ bind(done);
4507 
4508   // StoreStore barrier required after complete initialization
4509   // (headers + content zeroing), before the object may escape.
4510   __ membar(MacroAssembler::StoreStore, R1_tmp);
4511 }
4512 
4513 
4514 void TemplateTable::newarray() {
4515   transition(itos, atos);
4516   __ ldrb(R1, at_bcp(1));
4517   __ mov(R2, R0_tos);
4518   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4519   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4520 }
4521 
4522 
4523 void TemplateTable::anewarray() {
4524   transition(itos, atos);
4525   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4526   __ get_constant_pool(R1);
4527   __ mov(R3, R0_tos);
4528   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4529   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4530 }
4531 
4532 
4533 void TemplateTable::arraylength() {
4534   transition(atos, itos);
4535   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4536   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4537 }
4538 
4539 
4540 void TemplateTable::checkcast() {
4541   transition(atos, atos);
4542   Label done, is_null, quicked, resolved, throw_exception;
4543 
4544   const Register Robj = R0_tos;
4545   const Register Rcpool = R2_tmp;
4546   const Register Rtags = R3_tmp;
4547   const Register Rindex = R4_tmp;
4548   const Register Rsuper = R3_tmp;
4549   const Register Rsub   = R4_tmp;
4550   const Register Rsubtype_check_tmp1 = R1_tmp;
4551   const Register Rsubtype_check_tmp2 = LR_tmp;
4552 
4553   __ cbz(Robj, is_null);
4554 
4555   // Get cpool & tags index
4556   __ get_cpool_and_tags(Rcpool, Rtags);
4557   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4558 
4559   // See if bytecode has already been quicked
4560   __ add(Rtemp, Rtags, Rindex);
4561 #ifdef AARCH64
4562   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4563   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4564   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4565 #else
4566   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4567 #endif // AARCH64
4568 
4569   __ cmp(Rtemp, JVM_CONSTANT_Class);
4570 
4571 #ifndef AARCH64
4572   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4573 #endif // !AARCH64
4574 
4575   __ b(quicked, eq);
4576 
4577   __ push(atos);
4578   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4579   // vm_result_2 has metadata result
4580   __ get_vm_result_2(Rsuper, Robj);
4581   __ pop_ptr(Robj);
4582   __ b(resolved);
4583 
4584   __ bind(throw_exception);
4585   // Come here on failure of subtype check
4586   __ profile_typecheck_failed(R1_tmp);
4587   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4588   __ b(Interpreter::_throw_ClassCastException_entry);
4589 
4590   // Get superklass in Rsuper and subklass in Rsub
4591   __ bind(quicked);
4592   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4593 
4594   __ bind(resolved);
4595   __ load_klass(Rsub, Robj);
4596 
4597   // Generate subtype check. Blows both tmps and Rtemp.
4598   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4599   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4600 
4601   // Come here on success
4602 
4603   // Collect counts on whether this check-cast sees NULLs a lot or not.
4604   if (ProfileInterpreter) {
4605     __ b(done);
4606     __ bind(is_null);
4607     __ profile_null_seen(R1_tmp);
4608   } else {
4609     __ bind(is_null);   // same as 'done'
4610   }
4611   __ bind(done);
4612 }
4613 
4614 
4615 void TemplateTable::instanceof() {
4616   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4617   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4618 
4619   transition(atos, itos);
4620   Label done, is_null, not_subtype, quicked, resolved;
4621 
4622   const Register Robj = R0_tos;
4623   const Register Rcpool = R2_tmp;
4624   const Register Rtags = R3_tmp;
4625   const Register Rindex = R4_tmp;
4626   const Register Rsuper = R3_tmp;
4627   const Register Rsub   = R4_tmp;
4628   const Register Rsubtype_check_tmp1 = R0_tmp;
4629   const Register Rsubtype_check_tmp2 = R1_tmp;
4630 
4631   __ cbz(Robj, is_null);
4632 
4633   __ load_klass(Rsub, Robj);
4634 
4635   // Get cpool & tags index
4636   __ get_cpool_and_tags(Rcpool, Rtags);
4637   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4638 
4639   // See if bytecode has already been quicked
4640   __ add(Rtemp, Rtags, Rindex);
4641 #ifdef AARCH64
4642   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4643   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4644   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4645 #else
4646   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4647 #endif // AARCH64
4648   __ cmp(Rtemp, JVM_CONSTANT_Class);
4649 
4650 #ifndef AARCH64
4651   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4652 #endif // !AARCH64
4653 
4654   __ b(quicked, eq);
4655 
4656   __ push(atos);
4657   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4658   // vm_result_2 has metadata result
4659   __ get_vm_result_2(Rsuper, Robj);
4660   __ pop_ptr(Robj);
4661   __ b(resolved);
4662 
4663   // Get superklass in Rsuper and subklass in Rsub
4664   __ bind(quicked);
4665   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4666 
4667   __ bind(resolved);
4668   __ load_klass(Rsub, Robj);
4669 
4670   // Generate subtype check. Blows both tmps and Rtemp.
4671   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4672 
4673   // Come here on success
4674   __ mov(R0_tos, 1);
4675   __ b(done);
4676 
4677   __ bind(not_subtype);
4678   // Come here on failure
4679   __ profile_typecheck_failed(R1_tmp);
4680   __ mov(R0_tos, 0);
4681 
4682   // Collect counts on whether this test sees NULLs a lot or not.
4683   if (ProfileInterpreter) {
4684     __ b(done);
4685     __ bind(is_null);
4686     __ profile_null_seen(R1_tmp);
4687   } else {
4688     __ bind(is_null);   // same as 'done'
4689   }
4690   __ bind(done);
4691 }
4692 
4693 
4694 //----------------------------------------------------------------------------------------------------
4695 // Breakpoints
4696 void TemplateTable::_breakpoint() {
4697 
4698   // Note: We get here even if we are single stepping..
4699   // jbug inists on setting breakpoints at every bytecode
4700   // even if we are in single step mode.
4701 
4702   transition(vtos, vtos);
4703 
4704   // get the unpatched byte code
4705   __ mov(R1, Rmethod);
4706   __ mov(R2, Rbcp);
4707   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4708 #ifdef AARCH64
4709   __ sxtw(Rtmp_save0, R0);
4710 #else
4711   __ mov(Rtmp_save0, R0);
4712 #endif // AARCH64
4713 
4714   // post the breakpoint event
4715   __ mov(R1, Rmethod);
4716   __ mov(R2, Rbcp);
4717   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4718 
4719   // complete the execution of original bytecode
4720   __ mov(R3_bytecode, Rtmp_save0);
4721   __ dispatch_only_normal(vtos);
4722 }
4723 
4724 
4725 //----------------------------------------------------------------------------------------------------
4726 // Exceptions
4727 
4728 void TemplateTable::athrow() {
4729   transition(atos, vtos);
4730   __ mov(Rexception_obj, R0_tos);
4731   __ null_check(Rexception_obj, Rtemp);
4732   __ b(Interpreter::throw_exception_entry());
4733 }
4734 
4735 
4736 //----------------------------------------------------------------------------------------------------
4737 // Synchronization
4738 //
4739 // Note: monitorenter & exit are symmetric routines; which is reflected
4740 //       in the assembly code structure as well
4741 //
4742 // Stack layout:
4743 //
4744 // [expressions  ] <--- Rstack_top        = expression stack top
4745 // ..
4746 // [expressions  ]
4747 // [monitor entry] <--- monitor block top = expression stack bot
4748 // ..
4749 // [monitor entry]
4750 // [frame data   ] <--- monitor block bot
4751 // ...
4752 // [saved FP     ] <--- FP
4753 
4754 
4755 void TemplateTable::monitorenter() {
4756   transition(atos, vtos);
4757 
4758   const Register Robj = R0_tos;
4759   const Register Rentry = R1_tmp;
4760 
4761   // check for NULL object
4762   __ null_check(Robj, Rtemp);
4763 
4764   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4765   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4766   Label allocate_monitor, allocated;
4767 
4768   // initialize entry pointer
4769   __ mov(Rentry, 0);                             // points to free slot or NULL
4770 
4771   // find a free slot in the monitor block (result in Rentry)
4772   { Label loop, exit;
4773     const Register Rcur = R2_tmp;
4774     const Register Rcur_obj = Rtemp;
4775     const Register Rbottom = R3_tmp;
4776     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4777 
4778     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4779                                  // points to current entry, starting with top-most entry
4780     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4781                                  // points to word before bottom of monitor block
4782 
4783     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4784 #ifndef AARCH64
4785     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4786                                                  // prefetch monitor's object for the first iteration
4787 #endif // !AARCH64
4788     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4789 
4790     __ bind(loop);
4791 #ifdef AARCH64
4792     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4793 #endif // AARCH64
4794     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4795     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4796 
4797     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4798     __ b(exit, eq);                              // if same object then stop searching
4799 
4800     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4801 
4802     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4803 #ifndef AARCH64
4804     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4805                                                  // prefetch monitor's object for the next iteration
4806 #endif // !AARCH64
4807     __ b(loop, ne);                              // if not at bottom then check this entry
4808     __ bind(exit);
4809   }
4810 
4811   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4812 
4813   __ bind(allocate_monitor);
4814 
4815   // allocate one if there's no free slot
4816   { Label loop;
4817     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4818 
4819     // 1. compute new pointers
4820 
4821 #ifdef AARCH64
4822     __ check_extended_sp(Rtemp);
4823     __ sub(SP, SP, entry_size);                  // adjust extended SP
4824     __ mov(Rtemp, SP);
4825     __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
4826 #endif // AARCH64
4827 
4828     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4829                                                  // old monitor block top / expression stack bottom
4830 
4831     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4832     __ check_stack_top_on_expansion();
4833 
4834     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4835 
4836     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4837 
4838     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4839                                                  // set new monitor block top
4840 
4841     // 2. move expression stack contents
4842 
4843     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4844 #ifndef AARCH64
4845     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4846 #endif // !AARCH64
4847     __ b(allocated, eq);
4848 
4849     __ bind(loop);
4850 #ifdef AARCH64
4851     __ ldr(Rtemp, Address(R2_tmp, entry_size));             // load expression stack word from old location
4852 #endif // AARCH64
4853     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4854                                                             // and advance to next word
4855     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4856 #ifndef AARCH64
4857     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4858 #endif // !AARCH64
4859     __ b(loop, ne);                                         // if not at bottom then copy next word
4860   }
4861 
4862   // call run-time routine
4863 
4864   // Rentry: points to monitor entry
4865   __ bind(allocated);
4866 
4867   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4868   // The object has already been poped from the stack, so the expression stack looks correct.
4869   __ add(Rbcp, Rbcp, 1);
4870 
4871   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
4872   __ lock_object(Rentry);
4873 
4874   // check to make sure this monitor doesn't cause stack overflow after locking
4875   __ save_bcp();  // in case of exception
4876   __ arm_stack_overflow_check(0, Rtemp);
4877 
4878   // The bcp has already been incremented. Just need to dispatch to next instruction.
4879   __ dispatch_next(vtos);
4880 }
4881 
4882 
4883 void TemplateTable::monitorexit() {
4884   transition(atos, vtos);
4885 
4886   const Register Robj = R0_tos;
4887   const Register Rcur = R1_tmp;
4888   const Register Rbottom = R2_tmp;
4889   const Register Rcur_obj = Rtemp;
4890 
4891   // check for NULL object
4892   __ null_check(Robj, Rtemp);
4893 
4894   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4895   Label found, throw_exception;
4896 
4897   // find matching slot
4898   { Label loop;
4899     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4900 
4901     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4902                                  // points to current entry, starting with top-most entry
4903     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4904                                  // points to word before bottom of monitor block
4905 
4906     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4907 #ifndef AARCH64
4908     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4909                                                  // prefetch monitor's object for the first iteration
4910 #endif // !AARCH64
4911     __ b(throw_exception, eq);                   // throw exception if there are now monitors
4912 
4913     __ bind(loop);
4914 #ifdef AARCH64
4915     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4916 #endif // AARCH64
4917     // check if current entry is for same object
4918     __ cmp(Rcur_obj, Robj);
4919     __ b(found, eq);                             // if same object then stop searching
4920     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4921     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4922 #ifndef AARCH64
4923     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4924 #endif // !AARCH64
4925     __ b (loop, ne);                             // if not at bottom then check this entry
4926   }
4927 
4928   // error handling. Unlocking was not block-structured
4929   __ bind(throw_exception);
4930   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4931   __ should_not_reach_here();
4932 
4933   // call run-time routine
4934   // Rcur: points to monitor entry
4935   __ bind(found);
4936   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
4937   __ unlock_object(Rcur);
4938   __ pop_ptr(Robj);                              // discard object
4939 }
4940 
4941 
4942 //----------------------------------------------------------------------------------------------------
4943 // Wide instructions
4944 
4945 void TemplateTable::wide() {
4946   transition(vtos, vtos);
4947   __ ldrb(R3_bytecode, at_bcp(1));
4948 
4949   InlinedAddress Ltable((address)Interpreter::_wentry_point);
4950   __ ldr_literal(Rtemp, Ltable);
4951   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
4952 
4953   __ nop(); // to avoid filling CPU pipeline with invalid instructions
4954   __ nop();
4955   __ bind_literal(Ltable);
4956 }
4957 
4958 
4959 //----------------------------------------------------------------------------------------------------
4960 // Multi arrays
4961 
4962 void TemplateTable::multianewarray() {
4963   transition(vtos, atos);
4964   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
4965 
4966   // last dim is on top of stack; we want address of first one:
4967   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
4968   // the latter wordSize to point to the beginning of the array.
4969   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4970   __ sub(R1, Rtemp, wordSize);
4971 
4972   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
4973   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4974   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4975 }