1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "interpreter/interp_masm.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/cpCache.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 
  43 #define __ _masm->
  44 
  45 //----------------------------------------------------------------------------------------------------
  46 // Platform-dependent initialization
  47 
  48 void TemplateTable::pd_initialize() {
  49   // No arm specific initialization
  50 }
  51 
  52 //----------------------------------------------------------------------------------------------------
  53 // Address computation
  54 
  55 // local variables
  56 static inline Address iaddress(int n)            {
  57   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n)            { return iaddress(n + 1); }
  61 #ifndef AARCH64
  62 static inline Address haddress(int n)            { return iaddress(n + 0); }
  63 #endif // !AARCH64
  64 
  65 static inline Address faddress(int n)            { return iaddress(n); }
  66 static inline Address daddress(int n)            { return laddress(n); }
  67 static inline Address aaddress(int n)            { return iaddress(n); }
  68 
  69 
  70 void TemplateTable::get_local_base_addr(Register r, Register index) {
  71   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  72 }
  73 
  74 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  75 #ifdef AARCH64
  76   get_local_base_addr(scratch, index);
  77   return Address(scratch);
  78 #else
  79   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  80 #endif // AARCH64
  81 }
  82 
  83 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  84   return load_iaddress(index, scratch);
  85 }
  86 
  87 Address TemplateTable::load_faddress(Register index, Register scratch) {
  88 #ifdef __SOFTFP__
  89   return load_iaddress(index, scratch);
  90 #else
  91   get_local_base_addr(scratch, index);
  92   return Address(scratch);
  93 #endif // __SOFTFP__
  94 }
  95 
  96 Address TemplateTable::load_daddress(Register index, Register scratch) {
  97   get_local_base_addr(scratch, index);
  98   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  99 }
 100 
 101 // At top of Java expression stack which may be different than SP.
 102 // It isn't for category 1 objects.
 103 static inline Address at_tos() {
 104   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
 105 }
 106 
 107 static inline Address at_tos_p1() {
 108   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 109 }
 110 
 111 static inline Address at_tos_p2() {
 112   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 113 }
 114 
 115 
 116 // 32-bit ARM:
 117 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 118 // separate ldr instructions (supports nonadjacent values).
 119 // Used for longs in all modes, and for doubles in SOFTFP mode.
 120 //
 121 // AArch64: loads long local into R0_tos.
 122 //
 123 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 124   const Register Rlocal_base = tmp;
 125   assert_different_registers(Rlocal_index, tmp);
 126 
 127   get_local_base_addr(Rlocal_base, Rlocal_index);
 128 #ifdef AARCH64
 129   __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 130 #else
 131   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 132   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 133 #endif // AARCH64
 134 }
 135 
 136 
 137 // 32-bit ARM:
 138 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 139 // separate str instructions (supports nonadjacent values).
 140 // Used for longs in all modes, and for doubles in SOFTFP mode
 141 //
 142 // AArch64: stores R0_tos to long local.
 143 //
 144 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 145   const Register Rlocal_base = tmp;
 146   assert_different_registers(Rlocal_index, tmp);
 147 
 148   get_local_base_addr(Rlocal_base, Rlocal_index);
 149 #ifdef AARCH64
 150   __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 151 #else
 152   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 153   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 154 #endif // AARCH64
 155 }
 156 
 157 // Returns address of Java array element using temp register as address base.
 158 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 159   int logElemSize = exact_log2(type2aelembytes(elemType));
 160   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 161   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 162 }
 163 
 164 //----------------------------------------------------------------------------------------------------
 165 // Condition conversion
 166 AsmCondition convNegCond(TemplateTable::Condition cc) {
 167   switch (cc) {
 168     case TemplateTable::equal        : return ne;
 169     case TemplateTable::not_equal    : return eq;
 170     case TemplateTable::less         : return ge;
 171     case TemplateTable::less_equal   : return gt;
 172     case TemplateTable::greater      : return le;
 173     case TemplateTable::greater_equal: return lt;
 174   }
 175   ShouldNotReachHere();
 176   return nv;
 177 }
 178 
 179 //----------------------------------------------------------------------------------------------------
 180 // Miscelaneous helper routines
 181 
 182 // Store an oop (or NULL) at the address described by obj.
 183 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 184 // Also destroys new_val and obj.base().
 185 static void do_oop_store(InterpreterMacroAssembler* _masm,
 186                          Address obj,
 187                          Register new_val,
 188                          Register tmp1,
 189                          Register tmp2,
 190                          Register tmp3,
 191                          bool is_null,
 192                          DecoratorSet decorators = 0) {
 193 
 194   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 195   if (is_null) {
 196     __ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
 197   } else {
 198     __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
 199   }
 200 }
 201 
 202 static void do_oop_load(InterpreterMacroAssembler* _masm,
 203                         Register dst,
 204                         Address obj,
 205                         DecoratorSet decorators = 0) {
 206   __ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
 207 }
 208 
 209 Address TemplateTable::at_bcp(int offset) {
 210   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 211   return Address(Rbcp, offset);
 212 }
 213 
 214 
 215 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
 216 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 217                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 218                                    int byte_no) {
 219   assert_different_registers(bc_reg, temp_reg);
 220   if (!RewriteBytecodes)  return;
 221   Label L_patch_done;
 222 
 223   switch (bc) {
 224   case Bytecodes::_fast_aputfield:
 225   case Bytecodes::_fast_bputfield:
 226   case Bytecodes::_fast_zputfield:
 227   case Bytecodes::_fast_cputfield:
 228   case Bytecodes::_fast_dputfield:
 229   case Bytecodes::_fast_fputfield:
 230   case Bytecodes::_fast_iputfield:
 231   case Bytecodes::_fast_lputfield:
 232   case Bytecodes::_fast_sputfield:
 233     {
 234       // We skip bytecode quickening for putfield instructions when
 235       // the put_code written to the constant pool cache is zero.
 236       // This is required so that every execution of this instruction
 237       // calls out to InterpreterRuntime::resolve_get_put to do
 238       // additional, required work.
 239       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 240       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 241       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 242       __ mov(bc_reg, bc);
 243       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 244     }
 245     break;
 246   default:
 247     assert(byte_no == -1, "sanity");
 248     // the pair bytecodes have already done the load.
 249     if (load_bc_into_bc_reg) {
 250       __ mov(bc_reg, bc);
 251     }
 252   }
 253 
 254   if (__ can_post_breakpoint()) {
 255     Label L_fast_patch;
 256     // if a breakpoint is present we can't rewrite the stream directly
 257     __ ldrb(temp_reg, at_bcp(0));
 258     __ cmp(temp_reg, Bytecodes::_breakpoint);
 259     __ b(L_fast_patch, ne);
 260     if (bc_reg != R3) {
 261       __ mov(R3, bc_reg);
 262     }
 263     __ mov(R1, Rmethod);
 264     __ mov(R2, Rbcp);
 265     // Let breakpoint table handling rewrite to quicker bytecode
 266     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 267     __ b(L_patch_done);
 268     __ bind(L_fast_patch);
 269   }
 270 
 271 #ifdef ASSERT
 272   Label L_okay;
 273   __ ldrb(temp_reg, at_bcp(0));
 274   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 275   __ b(L_okay, eq);
 276   __ cmp(temp_reg, bc_reg);
 277   __ b(L_okay, eq);
 278   __ stop("patching the wrong bytecode");
 279   __ bind(L_okay);
 280 #endif
 281 
 282   // patch bytecode
 283   __ strb(bc_reg, at_bcp(0));
 284   __ bind(L_patch_done);
 285 }
 286 
 287 //----------------------------------------------------------------------------------------------------
 288 // Individual instructions
 289 
 290 void TemplateTable::nop() {
 291   transition(vtos, vtos);
 292   // nothing to do
 293 }
 294 
 295 void TemplateTable::shouldnotreachhere() {
 296   transition(vtos, vtos);
 297   __ stop("shouldnotreachhere bytecode");
 298 }
 299 
 300 
 301 
 302 void TemplateTable::aconst_null() {
 303   transition(vtos, atos);
 304   __ mov(R0_tos, 0);
 305 }
 306 
 307 
 308 void TemplateTable::iconst(int value) {
 309   transition(vtos, itos);
 310   __ mov_slow(R0_tos, value);
 311 }
 312 
 313 
 314 void TemplateTable::lconst(int value) {
 315   transition(vtos, ltos);
 316   assert((value == 0) || (value == 1), "unexpected long constant");
 317   __ mov(R0_tos, value);
 318 #ifndef AARCH64
 319   __ mov(R1_tos_hi, 0);
 320 #endif // !AARCH64
 321 }
 322 
 323 
 324 void TemplateTable::fconst(int value) {
 325   transition(vtos, ftos);
 326 #ifdef AARCH64
 327   switch(value) {
 328   case 0:   __ fmov_sw(S0_tos, ZR);    break;
 329   case 1:   __ fmov_s (S0_tos, 0x70);  break;
 330   case 2:   __ fmov_s (S0_tos, 0x00);  break;
 331   default:  ShouldNotReachHere();      break;
 332   }
 333 #else
 334   const int zero = 0;         // 0.0f
 335   const int one = 0x3f800000; // 1.0f
 336   const int two = 0x40000000; // 2.0f
 337 
 338   switch(value) {
 339   case 0:   __ mov(R0_tos, zero);   break;
 340   case 1:   __ mov(R0_tos, one);    break;
 341   case 2:   __ mov(R0_tos, two);    break;
 342   default:  ShouldNotReachHere();   break;
 343   }
 344 
 345 #ifndef __SOFTFP__
 346   __ fmsr(S0_tos, R0_tos);
 347 #endif // !__SOFTFP__
 348 #endif // AARCH64
 349 }
 350 
 351 
 352 void TemplateTable::dconst(int value) {
 353   transition(vtos, dtos);
 354 #ifdef AARCH64
 355   switch(value) {
 356   case 0:   __ fmov_dx(D0_tos, ZR);    break;
 357   case 1:   __ fmov_d (D0_tos, 0x70);  break;
 358   default:  ShouldNotReachHere();      break;
 359   }
 360 #else
 361   const int one_lo = 0;            // low part of 1.0
 362   const int one_hi = 0x3ff00000;   // high part of 1.0
 363 
 364   if (value == 0) {
 365 #ifdef __SOFTFP__
 366     __ mov(R0_tos_lo, 0);
 367     __ mov(R1_tos_hi, 0);
 368 #else
 369     __ mov(R0_tmp, 0);
 370     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 371 #endif // __SOFTFP__
 372   } else if (value == 1) {
 373     __ mov(R0_tos_lo, one_lo);
 374     __ mov_slow(R1_tos_hi, one_hi);
 375 #ifndef __SOFTFP__
 376     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 377 #endif // !__SOFTFP__
 378   } else {
 379     ShouldNotReachHere();
 380   }
 381 #endif // AARCH64
 382 }
 383 
 384 
 385 void TemplateTable::bipush() {
 386   transition(vtos, itos);
 387   __ ldrsb(R0_tos, at_bcp(1));
 388 }
 389 
 390 
 391 void TemplateTable::sipush() {
 392   transition(vtos, itos);
 393   __ ldrsb(R0_tmp, at_bcp(1));
 394   __ ldrb(R1_tmp, at_bcp(2));
 395   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 396 }
 397 
 398 
 399 void TemplateTable::ldc(bool wide) {
 400   transition(vtos, vtos);
 401   Label fastCase, Done;
 402 
 403   const Register Rindex = R1_tmp;
 404   const Register Rcpool = R2_tmp;
 405   const Register Rtags  = R3_tmp;
 406   const Register RtagType = R3_tmp;
 407 
 408   if (wide) {
 409     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 410   } else {
 411     __ ldrb(Rindex, at_bcp(1));
 412   }
 413   __ get_cpool_and_tags(Rcpool, Rtags);
 414 
 415   const int base_offset = ConstantPool::header_size() * wordSize;
 416   const int tags_offset = Array<u1>::base_offset_in_bytes();
 417 
 418   // get const type
 419   __ add(Rtemp, Rtags, tags_offset);
 420 #ifdef AARCH64
 421   __ add(Rtemp, Rtemp, Rindex);
 422   __ ldarb(RtagType, Rtemp);  // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
 423 #else
 424   __ ldrb(RtagType, Address(Rtemp, Rindex));
 425   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 426 #endif // AARCH64
 427 
 428   // unresolved class - get the resolved class
 429   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 430 
 431   // unresolved class in error (resolution failed) - call into runtime
 432   // so that the same error from first resolution attempt is thrown.
 433 #ifdef AARCH64
 434   __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
 435   __ cond_cmp(RtagType, Rtemp, ne);
 436 #else
 437   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 438 #endif // AARCH64
 439 
 440   // resolved class - need to call vm to get java mirror of the class
 441   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 442 
 443   __ b(fastCase, ne);
 444 
 445   // slow case - call runtime
 446   __ mov(R1, wide);
 447   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 448   __ push(atos);
 449   __ b(Done);
 450 
 451   // int, float, String
 452   __ bind(fastCase);
 453 #ifdef ASSERT
 454   { Label L;
 455     __ cmp(RtagType, JVM_CONSTANT_Integer);
 456     __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 457     __ b(L, eq);
 458     __ stop("unexpected tag type in ldc");
 459     __ bind(L);
 460   }
 461 #endif // ASSERT
 462   // itos, ftos
 463   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 464   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 465 
 466   // floats and ints are placed on stack in the same way, so
 467   // we can use push(itos) to transfer float value without VFP
 468   __ push(itos);
 469   __ bind(Done);
 470 }
 471 
 472 // Fast path for caching oop constants.
 473 void TemplateTable::fast_aldc(bool wide) {
 474   transition(vtos, atos);
 475   int index_size = wide ? sizeof(u2) : sizeof(u1);
 476   Label resolved;
 477 
 478   // We are resolved if the resolved reference cache entry contains a
 479   // non-null object (CallSite, etc.)
 480   assert_different_registers(R0_tos, R2_tmp);
 481   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 482   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 483   __ cbnz(R0_tos, resolved);
 484 
 485   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 486 
 487   // first time invocation - must resolve first
 488   __ mov(R1, (int)bytecode());
 489   __ call_VM(R0_tos, entry, R1);
 490   __ bind(resolved);
 491 
 492   if (VerifyOops) {
 493     __ verify_oop(R0_tos);
 494   }
 495 }
 496 
 497 void TemplateTable::ldc2_w() {
 498   transition(vtos, vtos);
 499   const Register Rtags  = R2_tmp;
 500   const Register Rindex = R3_tmp;
 501   const Register Rcpool = R4_tmp;
 502   const Register Rbase  = R5_tmp;
 503 
 504   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 505 
 506   __ get_cpool_and_tags(Rcpool, Rtags);
 507   const int base_offset = ConstantPool::header_size() * wordSize;
 508   const int tags_offset = Array<u1>::base_offset_in_bytes();
 509 
 510   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 511 
 512 #ifdef __ABI_HARD__
 513   Label Long, exit;
 514   // get type from tags
 515   __ add(Rtemp, Rtags, tags_offset);
 516   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 517   __ cmp(Rtemp, JVM_CONSTANT_Double);
 518   __ b(Long, ne);
 519   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 520 
 521   __ push(dtos);
 522   __ b(exit);
 523   __ bind(Long);
 524 #endif
 525 
 526 #ifdef AARCH64
 527   __ ldr(R0_tos, Address(Rbase, base_offset));
 528 #else
 529   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 530   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 531 #endif // AARCH64
 532   __ push(ltos);
 533 
 534 #ifdef __ABI_HARD__
 535   __ bind(exit);
 536 #endif
 537 }
 538 
 539 
 540 void TemplateTable::locals_index(Register reg, int offset) {
 541   __ ldrb(reg, at_bcp(offset));
 542 }
 543 
 544 void TemplateTable::iload() {
 545   iload_internal();
 546 }
 547 
 548 void TemplateTable::nofast_iload() {
 549   iload_internal(may_not_rewrite);
 550 }
 551 
 552 void TemplateTable::iload_internal(RewriteControl rc) {
 553   transition(vtos, itos);
 554 
 555   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 556     Label rewrite, done;
 557     const Register next_bytecode = R1_tmp;
 558     const Register target_bytecode = R2_tmp;
 559 
 560     // get next byte
 561     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 562     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 563     // last two iloads in a pair.  Comparing against fast_iload means that
 564     // the next bytecode is neither an iload or a caload, and therefore
 565     // an iload pair.
 566     __ cmp(next_bytecode, Bytecodes::_iload);
 567     __ b(done, eq);
 568 
 569     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 570     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 571     __ b(rewrite, eq);
 572 
 573     // if _caload, rewrite to fast_icaload
 574     __ cmp(next_bytecode, Bytecodes::_caload);
 575     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 576     __ b(rewrite, eq);
 577 
 578     // rewrite so iload doesn't check again.
 579     __ mov(target_bytecode, Bytecodes::_fast_iload);
 580 
 581     // rewrite
 582     // R2: fast bytecode
 583     __ bind(rewrite);
 584     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 585     __ bind(done);
 586   }
 587 
 588   // Get the local value into tos
 589   const Register Rlocal_index = R1_tmp;
 590   locals_index(Rlocal_index);
 591   Address local = load_iaddress(Rlocal_index, Rtemp);
 592   __ ldr_s32(R0_tos, local);
 593 }
 594 
 595 
 596 void TemplateTable::fast_iload2() {
 597   transition(vtos, itos);
 598   const Register Rlocal_index = R1_tmp;
 599 
 600   locals_index(Rlocal_index);
 601   Address local = load_iaddress(Rlocal_index, Rtemp);
 602   __ ldr_s32(R0_tos, local);
 603   __ push(itos);
 604 
 605   locals_index(Rlocal_index, 3);
 606   local = load_iaddress(Rlocal_index, Rtemp);
 607   __ ldr_s32(R0_tos, local);
 608 }
 609 
 610 void TemplateTable::fast_iload() {
 611   transition(vtos, itos);
 612   const Register Rlocal_index = R1_tmp;
 613 
 614   locals_index(Rlocal_index);
 615   Address local = load_iaddress(Rlocal_index, Rtemp);
 616   __ ldr_s32(R0_tos, local);
 617 }
 618 
 619 
 620 void TemplateTable::lload() {
 621   transition(vtos, ltos);
 622   const Register Rlocal_index = R2_tmp;
 623 
 624   locals_index(Rlocal_index);
 625   load_category2_local(Rlocal_index, R3_tmp);
 626 }
 627 
 628 
 629 void TemplateTable::fload() {
 630   transition(vtos, ftos);
 631   const Register Rlocal_index = R2_tmp;
 632 
 633   // Get the local value into tos
 634   locals_index(Rlocal_index);
 635   Address local = load_faddress(Rlocal_index, Rtemp);
 636 #ifdef __SOFTFP__
 637   __ ldr(R0_tos, local);
 638 #else
 639   __ ldr_float(S0_tos, local);
 640 #endif // __SOFTFP__
 641 }
 642 
 643 
 644 void TemplateTable::dload() {
 645   transition(vtos, dtos);
 646   const Register Rlocal_index = R2_tmp;
 647 
 648   locals_index(Rlocal_index);
 649 
 650 #ifdef __SOFTFP__
 651   load_category2_local(Rlocal_index, R3_tmp);
 652 #else
 653   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 654 #endif // __SOFTFP__
 655 }
 656 
 657 
 658 void TemplateTable::aload() {
 659   transition(vtos, atos);
 660   const Register Rlocal_index = R1_tmp;
 661 
 662   locals_index(Rlocal_index);
 663   Address local = load_aaddress(Rlocal_index, Rtemp);
 664   __ ldr(R0_tos, local);
 665 }
 666 
 667 
 668 void TemplateTable::locals_index_wide(Register reg) {
 669   assert_different_registers(reg, Rtemp);
 670   __ ldrb(Rtemp, at_bcp(2));
 671   __ ldrb(reg, at_bcp(3));
 672   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 673 }
 674 
 675 
 676 void TemplateTable::wide_iload() {
 677   transition(vtos, itos);
 678   const Register Rlocal_index = R2_tmp;
 679 
 680   locals_index_wide(Rlocal_index);
 681   Address local = load_iaddress(Rlocal_index, Rtemp);
 682   __ ldr_s32(R0_tos, local);
 683 }
 684 
 685 
 686 void TemplateTable::wide_lload() {
 687   transition(vtos, ltos);
 688   const Register Rlocal_index = R2_tmp;
 689   const Register Rlocal_base = R3_tmp;
 690 
 691   locals_index_wide(Rlocal_index);
 692   load_category2_local(Rlocal_index, R3_tmp);
 693 }
 694 
 695 
 696 void TemplateTable::wide_fload() {
 697   transition(vtos, ftos);
 698   const Register Rlocal_index = R2_tmp;
 699 
 700   locals_index_wide(Rlocal_index);
 701   Address local = load_faddress(Rlocal_index, Rtemp);
 702 #ifdef __SOFTFP__
 703   __ ldr(R0_tos, local);
 704 #else
 705   __ ldr_float(S0_tos, local);
 706 #endif // __SOFTFP__
 707 }
 708 
 709 
 710 void TemplateTable::wide_dload() {
 711   transition(vtos, dtos);
 712   const Register Rlocal_index = R2_tmp;
 713 
 714   locals_index_wide(Rlocal_index);
 715 #ifdef __SOFTFP__
 716   load_category2_local(Rlocal_index, R3_tmp);
 717 #else
 718   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 719 #endif // __SOFTFP__
 720 }
 721 
 722 
 723 void TemplateTable::wide_aload() {
 724   transition(vtos, atos);
 725   const Register Rlocal_index = R2_tmp;
 726 
 727   locals_index_wide(Rlocal_index);
 728   Address local = load_aaddress(Rlocal_index, Rtemp);
 729   __ ldr(R0_tos, local);
 730 }
 731 
 732 void TemplateTable::index_check(Register array, Register index) {
 733   // Pop ptr into array
 734   __ pop_ptr(array);
 735   index_check_without_pop(array, index);
 736 }
 737 
 738 void TemplateTable::index_check_without_pop(Register array, Register index) {
 739   assert_different_registers(array, index, Rtemp);
 740   // check array
 741   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 742   // check index
 743   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 744   __ cmp_32(index, Rtemp);
 745   if (index != R4_ArrayIndexOutOfBounds_index) {
 746     // convention with generate_ArrayIndexOutOfBounds_handler()
 747     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 748   }
 749   __ mov(R1, array, hs);
 750   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 751 }
 752 
 753 
 754 void TemplateTable::iaload() {
 755   transition(itos, itos);
 756   const Register Rarray = R1_tmp;
 757   const Register Rindex = R0_tos;
 758 
 759   index_check(Rarray, Rindex);
 760   __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
 761 }
 762 
 763 
 764 void TemplateTable::laload() {
 765   transition(itos, ltos);
 766   const Register Rarray = R1_tmp;
 767   const Register Rindex = R0_tos;
 768 
 769   index_check(Rarray, Rindex);
 770 
 771 #ifdef AARCH64
 772   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 773 #else
 774   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 775   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
 776   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 777 #endif // AARCH64
 778 }
 779 
 780 
 781 void TemplateTable::faload() {
 782   transition(itos, ftos);
 783   const Register Rarray = R1_tmp;
 784   const Register Rindex = R0_tos;
 785 
 786   index_check(Rarray, Rindex);
 787 
 788   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
 789 #ifdef __SOFTFP__
 790   __ ldr(R0_tos, addr);
 791 #else
 792   __ ldr_float(S0_tos, addr);
 793 #endif // __SOFTFP__
 794 }
 795 
 796 
 797 void TemplateTable::daload() {
 798   transition(itos, dtos);
 799   const Register Rarray = R1_tmp;
 800   const Register Rindex = R0_tos;
 801 
 802   index_check(Rarray, Rindex);
 803 
 804 #ifdef __SOFTFP__
 805   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 806   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
 807   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 808 #else
 809   __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
 810 #endif // __SOFTFP__
 811 }
 812 
 813 
 814 void TemplateTable::aaload() {
 815   transition(itos, atos);
 816   const Register Rarray = R1_tmp;
 817   const Register Rindex = R0_tos;
 818 
 819   index_check(Rarray, Rindex);
 820   do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IN_HEAP_ARRAY);
 821 }
 822 
 823 
 824 void TemplateTable::baload() {
 825   transition(itos, itos);
 826   const Register Rarray = R1_tmp;
 827   const Register Rindex = R0_tos;
 828 
 829   index_check(Rarray, Rindex);
 830   __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
 831 }
 832 
 833 
 834 void TemplateTable::caload() {
 835   transition(itos, itos);
 836   const Register Rarray = R1_tmp;
 837   const Register Rindex = R0_tos;
 838 
 839   index_check(Rarray, Rindex);
 840   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 841 }
 842 
 843 
 844 // iload followed by caload frequent pair
 845 void TemplateTable::fast_icaload() {
 846   transition(vtos, itos);
 847   const Register Rlocal_index = R1_tmp;
 848   const Register Rarray = R1_tmp;
 849   const Register Rindex = R4_tmp; // index_check prefers index on R4
 850   assert_different_registers(Rlocal_index, Rindex);
 851   assert_different_registers(Rarray, Rindex);
 852 
 853   // load index out of locals
 854   locals_index(Rlocal_index);
 855   Address local = load_iaddress(Rlocal_index, Rtemp);
 856   __ ldr_s32(Rindex, local);
 857 
 858   // get array element
 859   index_check(Rarray, Rindex);
 860   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 861 }
 862 
 863 
 864 void TemplateTable::saload() {
 865   transition(itos, itos);
 866   const Register Rarray = R1_tmp;
 867   const Register Rindex = R0_tos;
 868 
 869   index_check(Rarray, Rindex);
 870   __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
 871 }
 872 
 873 
 874 void TemplateTable::iload(int n) {
 875   transition(vtos, itos);
 876   __ ldr_s32(R0_tos, iaddress(n));
 877 }
 878 
 879 
 880 void TemplateTable::lload(int n) {
 881   transition(vtos, ltos);
 882 #ifdef AARCH64
 883   __ ldr(R0_tos, laddress(n));
 884 #else
 885   __ ldr(R0_tos_lo, laddress(n));
 886   __ ldr(R1_tos_hi, haddress(n));
 887 #endif // AARCH64
 888 }
 889 
 890 
 891 void TemplateTable::fload(int n) {
 892   transition(vtos, ftos);
 893 #ifdef __SOFTFP__
 894   __ ldr(R0_tos, faddress(n));
 895 #else
 896   __ ldr_float(S0_tos, faddress(n));
 897 #endif // __SOFTFP__
 898 }
 899 
 900 
 901 void TemplateTable::dload(int n) {
 902   transition(vtos, dtos);
 903 #ifdef __SOFTFP__
 904   __ ldr(R0_tos_lo, laddress(n));
 905   __ ldr(R1_tos_hi, haddress(n));
 906 #else
 907   __ ldr_double(D0_tos, daddress(n));
 908 #endif // __SOFTFP__
 909 }
 910 
 911 
 912 void TemplateTable::aload(int n) {
 913   transition(vtos, atos);
 914   __ ldr(R0_tos, aaddress(n));
 915 }
 916 
 917 void TemplateTable::aload_0() {
 918   aload_0_internal();
 919 }
 920 
 921 void TemplateTable::nofast_aload_0() {
 922   aload_0_internal(may_not_rewrite);
 923 }
 924 
 925 void TemplateTable::aload_0_internal(RewriteControl rc) {
 926   transition(vtos, atos);
 927   // According to bytecode histograms, the pairs:
 928   //
 929   // _aload_0, _fast_igetfield
 930   // _aload_0, _fast_agetfield
 931   // _aload_0, _fast_fgetfield
 932   //
 933   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
 934   // bytecode checks if the next bytecode is either _fast_igetfield,
 935   // _fast_agetfield or _fast_fgetfield and then rewrites the
 936   // current bytecode into a pair bytecode; otherwise it rewrites the current
 937   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
 938   //
 939   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
 940   //       otherwise we may miss an opportunity for a pair.
 941   //
 942   // Also rewrite frequent pairs
 943   //   aload_0, aload_1
 944   //   aload_0, iload_1
 945   // These bytecodes with a small amount of code are most profitable to rewrite
 946   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 947     Label rewrite, done;
 948     const Register next_bytecode = R1_tmp;
 949     const Register target_bytecode = R2_tmp;
 950 
 951     // get next byte
 952     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 953 
 954     // if _getfield then wait with rewrite
 955     __ cmp(next_bytecode, Bytecodes::_getfield);
 956     __ b(done, eq);
 957 
 958     // if _igetfield then rewrite to _fast_iaccess_0
 959     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 960     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
 961     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
 962     __ b(rewrite, eq);
 963 
 964     // if _agetfield then rewrite to _fast_aaccess_0
 965     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 966     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
 967     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
 968     __ b(rewrite, eq);
 969 
 970     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
 971     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 972     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 973 
 974     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
 975 #ifdef AARCH64
 976     __ mov(Rtemp, Bytecodes::_fast_faccess_0);
 977     __ mov(target_bytecode, Bytecodes::_fast_aload_0);
 978     __ mov(target_bytecode, Rtemp, eq);
 979 #else
 980     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
 981     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
 982 #endif // AARCH64
 983 
 984     // rewrite
 985     __ bind(rewrite);
 986     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
 987 
 988     __ bind(done);
 989   }
 990 
 991   aload(0);
 992 }
 993 
 994 void TemplateTable::istore() {
 995   transition(itos, vtos);
 996   const Register Rlocal_index = R2_tmp;
 997 
 998   locals_index(Rlocal_index);
 999   Address local = load_iaddress(Rlocal_index, Rtemp);
1000   __ str_32(R0_tos, local);
1001 }
1002 
1003 
1004 void TemplateTable::lstore() {
1005   transition(ltos, vtos);
1006   const Register Rlocal_index = R2_tmp;
1007 
1008   locals_index(Rlocal_index);
1009   store_category2_local(Rlocal_index, R3_tmp);
1010 }
1011 
1012 
1013 void TemplateTable::fstore() {
1014   transition(ftos, vtos);
1015   const Register Rlocal_index = R2_tmp;
1016 
1017   locals_index(Rlocal_index);
1018   Address local = load_faddress(Rlocal_index, Rtemp);
1019 #ifdef __SOFTFP__
1020   __ str(R0_tos, local);
1021 #else
1022   __ str_float(S0_tos, local);
1023 #endif // __SOFTFP__
1024 }
1025 
1026 
1027 void TemplateTable::dstore() {
1028   transition(dtos, vtos);
1029   const Register Rlocal_index = R2_tmp;
1030 
1031   locals_index(Rlocal_index);
1032 
1033 #ifdef __SOFTFP__
1034   store_category2_local(Rlocal_index, R3_tmp);
1035 #else
1036   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1037 #endif // __SOFTFP__
1038 }
1039 
1040 
1041 void TemplateTable::astore() {
1042   transition(vtos, vtos);
1043   const Register Rlocal_index = R1_tmp;
1044 
1045   __ pop_ptr(R0_tos);
1046   locals_index(Rlocal_index);
1047   Address local = load_aaddress(Rlocal_index, Rtemp);
1048   __ str(R0_tos, local);
1049 }
1050 
1051 
1052 void TemplateTable::wide_istore() {
1053   transition(vtos, vtos);
1054   const Register Rlocal_index = R2_tmp;
1055 
1056   __ pop_i(R0_tos);
1057   locals_index_wide(Rlocal_index);
1058   Address local = load_iaddress(Rlocal_index, Rtemp);
1059   __ str_32(R0_tos, local);
1060 }
1061 
1062 
1063 void TemplateTable::wide_lstore() {
1064   transition(vtos, vtos);
1065   const Register Rlocal_index = R2_tmp;
1066   const Register Rlocal_base = R3_tmp;
1067 
1068 #ifdef AARCH64
1069   __ pop_l(R0_tos);
1070 #else
1071   __ pop_l(R0_tos_lo, R1_tos_hi);
1072 #endif // AARCH64
1073 
1074   locals_index_wide(Rlocal_index);
1075   store_category2_local(Rlocal_index, R3_tmp);
1076 }
1077 
1078 
1079 void TemplateTable::wide_fstore() {
1080   wide_istore();
1081 }
1082 
1083 
1084 void TemplateTable::wide_dstore() {
1085   wide_lstore();
1086 }
1087 
1088 
1089 void TemplateTable::wide_astore() {
1090   transition(vtos, vtos);
1091   const Register Rlocal_index = R2_tmp;
1092 
1093   __ pop_ptr(R0_tos);
1094   locals_index_wide(Rlocal_index);
1095   Address local = load_aaddress(Rlocal_index, Rtemp);
1096   __ str(R0_tos, local);
1097 }
1098 
1099 
1100 void TemplateTable::iastore() {
1101   transition(itos, vtos);
1102   const Register Rindex = R4_tmp; // index_check prefers index in R4
1103   const Register Rarray = R3_tmp;
1104   // R0_tos: value
1105 
1106   __ pop_i(Rindex);
1107   index_check(Rarray, Rindex);
1108   __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1109 }
1110 
1111 
1112 void TemplateTable::lastore() {
1113   transition(ltos, vtos);
1114   const Register Rindex = R4_tmp; // index_check prefers index in R4
1115   const Register Rarray = R3_tmp;
1116   // R0_tos_lo:R1_tos_hi: value
1117 
1118   __ pop_i(Rindex);
1119   index_check(Rarray, Rindex);
1120 
1121 #ifdef AARCH64
1122   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1123 #else
1124   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1125   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1126   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1127 #endif // AARCH64
1128 }
1129 
1130 
1131 void TemplateTable::fastore() {
1132   transition(ftos, vtos);
1133   const Register Rindex = R4_tmp; // index_check prefers index in R4
1134   const Register Rarray = R3_tmp;
1135   // S0_tos/R0_tos: value
1136 
1137   __ pop_i(Rindex);
1138   index_check(Rarray, Rindex);
1139   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1140 
1141 #ifdef __SOFTFP__
1142   __ str(R0_tos, addr);
1143 #else
1144   __ str_float(S0_tos, addr);
1145 #endif // __SOFTFP__
1146 }
1147 
1148 
1149 void TemplateTable::dastore() {
1150   transition(dtos, vtos);
1151   const Register Rindex = R4_tmp; // index_check prefers index in R4
1152   const Register Rarray = R3_tmp;
1153   // D0_tos / R0_tos_lo:R1_to_hi: value
1154 
1155   __ pop_i(Rindex);
1156   index_check(Rarray, Rindex);
1157 
1158 #ifdef __SOFTFP__
1159   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1160   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1161   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1162 #else
1163   __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1164 #endif // __SOFTFP__
1165 }
1166 
1167 
1168 void TemplateTable::aastore() {
1169   transition(vtos, vtos);
1170   Label is_null, throw_array_store, done;
1171 
1172   const Register Raddr_1   = R1_tmp;
1173   const Register Rvalue_2  = R2_tmp;
1174   const Register Rarray_3  = R3_tmp;
1175   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1176   const Register Rsub_5    = R5_tmp;
1177   const Register Rsuper_LR = LR_tmp;
1178 
1179   // stack: ..., array, index, value
1180   __ ldr(Rvalue_2, at_tos());     // Value
1181   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1182   __ ldr(Rarray_3, at_tos_p2());  // Array
1183 
1184   index_check_without_pop(Rarray_3, Rindex_4);
1185 
1186   // Compute the array base
1187   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1188 
1189   // do array store check - check for NULL value first
1190   __ cbz(Rvalue_2, is_null);
1191 
1192   // Load subklass
1193   __ load_klass(Rsub_5, Rvalue_2);
1194   // Load superklass
1195   __ load_klass(Rtemp, Rarray_3);
1196   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1197 
1198   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1199   // Come here on success
1200 
1201   // Store value
1202   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1203 
1204   // Now store using the appropriate barrier
1205   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IN_HEAP_ARRAY);
1206   __ b(done);
1207 
1208   __ bind(throw_array_store);
1209 
1210   // Come here on failure of subtype check
1211   __ profile_typecheck_failed(R0_tmp);
1212 
1213   // object is at TOS
1214   __ b(Interpreter::_throw_ArrayStoreException_entry);
1215 
1216   // Have a NULL in Rvalue_2, store NULL at array[index].
1217   __ bind(is_null);
1218   __ profile_null_seen(R0_tmp);
1219 
1220   // Store a NULL
1221   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IN_HEAP_ARRAY);
1222 
1223   // Pop stack arguments
1224   __ bind(done);
1225   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1226 }
1227 
1228 
1229 void TemplateTable::bastore() {
1230   transition(itos, vtos);
1231   const Register Rindex = R4_tmp; // index_check prefers index in R4
1232   const Register Rarray = R3_tmp;
1233   // R0_tos: value
1234 
1235   __ pop_i(Rindex);
1236   index_check(Rarray, Rindex);
1237 
1238   // Need to check whether array is boolean or byte
1239   // since both types share the bastore bytecode.
1240   __ load_klass(Rtemp, Rarray);
1241   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1242   Label L_skip;
1243   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1244   __ b(L_skip, eq);
1245   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1246   __ bind(L_skip);
1247   __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1248 }
1249 
1250 
1251 void TemplateTable::castore() {
1252   transition(itos, vtos);
1253   const Register Rindex = R4_tmp; // index_check prefers index in R4
1254   const Register Rarray = R3_tmp;
1255   // R0_tos: value
1256 
1257   __ pop_i(Rindex);
1258   index_check(Rarray, Rindex);
1259 
1260   __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1261 }
1262 
1263 
1264 void TemplateTable::sastore() {
1265   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1266            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1267          "base offsets for char and short should be equal");
1268   castore();
1269 }
1270 
1271 
1272 void TemplateTable::istore(int n) {
1273   transition(itos, vtos);
1274   __ str_32(R0_tos, iaddress(n));
1275 }
1276 
1277 
1278 void TemplateTable::lstore(int n) {
1279   transition(ltos, vtos);
1280 #ifdef AARCH64
1281   __ str(R0_tos, laddress(n));
1282 #else
1283   __ str(R0_tos_lo, laddress(n));
1284   __ str(R1_tos_hi, haddress(n));
1285 #endif // AARCH64
1286 }
1287 
1288 
1289 void TemplateTable::fstore(int n) {
1290   transition(ftos, vtos);
1291 #ifdef __SOFTFP__
1292   __ str(R0_tos, faddress(n));
1293 #else
1294   __ str_float(S0_tos, faddress(n));
1295 #endif // __SOFTFP__
1296 }
1297 
1298 
1299 void TemplateTable::dstore(int n) {
1300   transition(dtos, vtos);
1301 #ifdef __SOFTFP__
1302   __ str(R0_tos_lo, laddress(n));
1303   __ str(R1_tos_hi, haddress(n));
1304 #else
1305   __ str_double(D0_tos, daddress(n));
1306 #endif // __SOFTFP__
1307 }
1308 
1309 
1310 void TemplateTable::astore(int n) {
1311   transition(vtos, vtos);
1312   __ pop_ptr(R0_tos);
1313   __ str(R0_tos, aaddress(n));
1314 }
1315 
1316 
1317 void TemplateTable::pop() {
1318   transition(vtos, vtos);
1319   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1320 }
1321 
1322 
1323 void TemplateTable::pop2() {
1324   transition(vtos, vtos);
1325   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1326 }
1327 
1328 
1329 void TemplateTable::dup() {
1330   transition(vtos, vtos);
1331   // stack: ..., a
1332   __ load_ptr(0, R0_tmp);
1333   __ push_ptr(R0_tmp);
1334   // stack: ..., a, a
1335 }
1336 
1337 
1338 void TemplateTable::dup_x1() {
1339   transition(vtos, vtos);
1340   // stack: ..., a, b
1341   __ load_ptr(0, R0_tmp);  // load b
1342   __ load_ptr(1, R2_tmp);  // load a
1343   __ store_ptr(1, R0_tmp); // store b
1344   __ store_ptr(0, R2_tmp); // store a
1345   __ push_ptr(R0_tmp);     // push b
1346   // stack: ..., b, a, b
1347 }
1348 
1349 
1350 void TemplateTable::dup_x2() {
1351   transition(vtos, vtos);
1352   // stack: ..., a, b, c
1353   __ load_ptr(0, R0_tmp);   // load c
1354   __ load_ptr(1, R2_tmp);   // load b
1355   __ load_ptr(2, R4_tmp);   // load a
1356 
1357   __ push_ptr(R0_tmp);      // push c
1358 
1359   // stack: ..., a, b, c, c
1360   __ store_ptr(1, R2_tmp);  // store b
1361   __ store_ptr(2, R4_tmp);  // store a
1362   __ store_ptr(3, R0_tmp);  // store c
1363   // stack: ..., c, a, b, c
1364 }
1365 
1366 
1367 void TemplateTable::dup2() {
1368   transition(vtos, vtos);
1369   // stack: ..., a, b
1370   __ load_ptr(1, R0_tmp);  // load a
1371   __ push_ptr(R0_tmp);     // push a
1372   __ load_ptr(1, R0_tmp);  // load b
1373   __ push_ptr(R0_tmp);     // push b
1374   // stack: ..., a, b, a, b
1375 }
1376 
1377 
1378 void TemplateTable::dup2_x1() {
1379   transition(vtos, vtos);
1380 
1381   // stack: ..., a, b, c
1382   __ load_ptr(0, R4_tmp);  // load c
1383   __ load_ptr(1, R2_tmp);  // load b
1384   __ load_ptr(2, R0_tmp);  // load a
1385 
1386   __ push_ptr(R2_tmp);     // push b
1387   __ push_ptr(R4_tmp);     // push c
1388 
1389   // stack: ..., a, b, c, b, c
1390 
1391   __ store_ptr(2, R0_tmp);  // store a
1392   __ store_ptr(3, R4_tmp);  // store c
1393   __ store_ptr(4, R2_tmp);  // store b
1394 
1395   // stack: ..., b, c, a, b, c
1396 }
1397 
1398 
1399 void TemplateTable::dup2_x2() {
1400   transition(vtos, vtos);
1401   // stack: ..., a, b, c, d
1402   __ load_ptr(0, R0_tmp);  // load d
1403   __ load_ptr(1, R2_tmp);  // load c
1404   __ push_ptr(R2_tmp);     // push c
1405   __ push_ptr(R0_tmp);     // push d
1406   // stack: ..., a, b, c, d, c, d
1407   __ load_ptr(4, R4_tmp);  // load b
1408   __ store_ptr(4, R0_tmp); // store d in b
1409   __ store_ptr(2, R4_tmp); // store b in d
1410   // stack: ..., a, d, c, b, c, d
1411   __ load_ptr(5, R4_tmp);  // load a
1412   __ store_ptr(5, R2_tmp); // store c in a
1413   __ store_ptr(3, R4_tmp); // store a in c
1414   // stack: ..., c, d, a, b, c, d
1415 }
1416 
1417 
1418 void TemplateTable::swap() {
1419   transition(vtos, vtos);
1420   // stack: ..., a, b
1421   __ load_ptr(1, R0_tmp);  // load a
1422   __ load_ptr(0, R2_tmp);  // load b
1423   __ store_ptr(0, R0_tmp); // store a in b
1424   __ store_ptr(1, R2_tmp); // store b in a
1425   // stack: ..., b, a
1426 }
1427 
1428 
1429 void TemplateTable::iop2(Operation op) {
1430   transition(itos, itos);
1431   const Register arg1 = R1_tmp;
1432   const Register arg2 = R0_tos;
1433 
1434   __ pop_i(arg1);
1435   switch (op) {
1436     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1437     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1438     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1439     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1440     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1441     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1442 #ifdef AARCH64
1443     case shl  : __ lslv_w (R0_tos, arg1, arg2); break;
1444     case shr  : __ asrv_w (R0_tos, arg1, arg2); break;
1445     case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1446 #else
1447     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1448     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1449     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1450 #endif // AARCH64
1451     default   : ShouldNotReachHere();
1452   }
1453 }
1454 
1455 
1456 void TemplateTable::lop2(Operation op) {
1457   transition(ltos, ltos);
1458 #ifdef AARCH64
1459   const Register arg1 = R1_tmp;
1460   const Register arg2 = R0_tos;
1461 
1462   __ pop_l(arg1);
1463   switch (op) {
1464     case add  : __ add (R0_tos, arg1, arg2); break;
1465     case sub  : __ sub (R0_tos, arg1, arg2); break;
1466     case _and : __ andr(R0_tos, arg1, arg2); break;
1467     case _or  : __ orr (R0_tos, arg1, arg2); break;
1468     case _xor : __ eor (R0_tos, arg1, arg2); break;
1469     default   : ShouldNotReachHere();
1470   }
1471 #else
1472   const Register arg1_lo = R2_tmp;
1473   const Register arg1_hi = R3_tmp;
1474   const Register arg2_lo = R0_tos_lo;
1475   const Register arg2_hi = R1_tos_hi;
1476 
1477   __ pop_l(arg1_lo, arg1_hi);
1478   switch (op) {
1479     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1480     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1481     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1482     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1483     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1484     default : ShouldNotReachHere();
1485   }
1486 #endif // AARCH64
1487 }
1488 
1489 
1490 void TemplateTable::idiv() {
1491   transition(itos, itos);
1492 #ifdef AARCH64
1493   const Register divisor = R0_tos;
1494   const Register dividend = R1_tmp;
1495 
1496   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1497   __ pop_i(dividend);
1498   __ sdiv_w(R0_tos, dividend, divisor);
1499 #else
1500   __ mov(R2, R0_tos);
1501   __ pop_i(R0);
1502   // R0 - dividend
1503   // R2 - divisor
1504   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1505   // R1 - result
1506   __ mov(R0_tos, R1);
1507 #endif // AARCH64
1508 }
1509 
1510 
1511 void TemplateTable::irem() {
1512   transition(itos, itos);
1513 #ifdef AARCH64
1514   const Register divisor = R0_tos;
1515   const Register dividend = R1_tmp;
1516   const Register quotient = R2_tmp;
1517 
1518   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1519   __ pop_i(dividend);
1520   __ sdiv_w(quotient, dividend, divisor);
1521   __ msub_w(R0_tos, divisor, quotient, dividend);
1522 #else
1523   __ mov(R2, R0_tos);
1524   __ pop_i(R0);
1525   // R0 - dividend
1526   // R2 - divisor
1527   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1528   // R0 - remainder
1529 #endif // AARCH64
1530 }
1531 
1532 
1533 void TemplateTable::lmul() {
1534   transition(ltos, ltos);
1535 #ifdef AARCH64
1536   const Register arg1 = R0_tos;
1537   const Register arg2 = R1_tmp;
1538 
1539   __ pop_l(arg2);
1540   __ mul(R0_tos, arg1, arg2);
1541 #else
1542   const Register arg1_lo = R0_tos_lo;
1543   const Register arg1_hi = R1_tos_hi;
1544   const Register arg2_lo = R2_tmp;
1545   const Register arg2_hi = R3_tmp;
1546 
1547   __ pop_l(arg2_lo, arg2_hi);
1548 
1549   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1550 #endif // AARCH64
1551 }
1552 
1553 
1554 void TemplateTable::ldiv() {
1555   transition(ltos, ltos);
1556 #ifdef AARCH64
1557   const Register divisor = R0_tos;
1558   const Register dividend = R1_tmp;
1559 
1560   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1561   __ pop_l(dividend);
1562   __ sdiv(R0_tos, dividend, divisor);
1563 #else
1564   const Register x_lo = R2_tmp;
1565   const Register x_hi = R3_tmp;
1566   const Register y_lo = R0_tos_lo;
1567   const Register y_hi = R1_tos_hi;
1568 
1569   __ pop_l(x_lo, x_hi);
1570 
1571   // check if y = 0
1572   __ orrs(Rtemp, y_lo, y_hi);
1573   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1574   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1575 #endif // AARCH64
1576 }
1577 
1578 
1579 void TemplateTable::lrem() {
1580   transition(ltos, ltos);
1581 #ifdef AARCH64
1582   const Register divisor = R0_tos;
1583   const Register dividend = R1_tmp;
1584   const Register quotient = R2_tmp;
1585 
1586   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1587   __ pop_l(dividend);
1588   __ sdiv(quotient, dividend, divisor);
1589   __ msub(R0_tos, divisor, quotient, dividend);
1590 #else
1591   const Register x_lo = R2_tmp;
1592   const Register x_hi = R3_tmp;
1593   const Register y_lo = R0_tos_lo;
1594   const Register y_hi = R1_tos_hi;
1595 
1596   __ pop_l(x_lo, x_hi);
1597 
1598   // check if y = 0
1599   __ orrs(Rtemp, y_lo, y_hi);
1600   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1601   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1602 #endif // AARCH64
1603 }
1604 
1605 
1606 void TemplateTable::lshl() {
1607   transition(itos, ltos);
1608 #ifdef AARCH64
1609   const Register val = R1_tmp;
1610   const Register shift_cnt = R0_tos;
1611   __ pop_l(val);
1612   __ lslv(R0_tos, val, shift_cnt);
1613 #else
1614   const Register shift_cnt = R4_tmp;
1615   const Register val_lo = R2_tmp;
1616   const Register val_hi = R3_tmp;
1617 
1618   __ pop_l(val_lo, val_hi);
1619   __ andr(shift_cnt, R0_tos, 63);
1620   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1621 #endif // AARCH64
1622 }
1623 
1624 
1625 void TemplateTable::lshr() {
1626   transition(itos, ltos);
1627 #ifdef AARCH64
1628   const Register val = R1_tmp;
1629   const Register shift_cnt = R0_tos;
1630   __ pop_l(val);
1631   __ asrv(R0_tos, val, shift_cnt);
1632 #else
1633   const Register shift_cnt = R4_tmp;
1634   const Register val_lo = R2_tmp;
1635   const Register val_hi = R3_tmp;
1636 
1637   __ pop_l(val_lo, val_hi);
1638   __ andr(shift_cnt, R0_tos, 63);
1639   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1640 #endif // AARCH64
1641 }
1642 
1643 
1644 void TemplateTable::lushr() {
1645   transition(itos, ltos);
1646 #ifdef AARCH64
1647   const Register val = R1_tmp;
1648   const Register shift_cnt = R0_tos;
1649   __ pop_l(val);
1650   __ lsrv(R0_tos, val, shift_cnt);
1651 #else
1652   const Register shift_cnt = R4_tmp;
1653   const Register val_lo = R2_tmp;
1654   const Register val_hi = R3_tmp;
1655 
1656   __ pop_l(val_lo, val_hi);
1657   __ andr(shift_cnt, R0_tos, 63);
1658   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1659 #endif // AARCH64
1660 }
1661 
1662 
1663 void TemplateTable::fop2(Operation op) {
1664   transition(ftos, ftos);
1665 #ifdef __SOFTFP__
1666   __ mov(R1, R0_tos);
1667   __ pop_i(R0);
1668   switch (op) {
1669     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1670     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1671     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1672     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1673     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1674     default : ShouldNotReachHere();
1675   }
1676 #else
1677   const FloatRegister arg1 = S1_tmp;
1678   const FloatRegister arg2 = S0_tos;
1679 
1680   switch (op) {
1681     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1682     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1683     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1684     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1685     case rem:
1686 #ifndef __ABI_HARD__
1687       __ pop_f(arg1);
1688       __ fmrs(R0, arg1);
1689       __ fmrs(R1, arg2);
1690       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1691       __ fmsr(S0_tos, R0);
1692 #else
1693       __ mov_float(S1_reg, arg2);
1694       __ pop_f(S0);
1695       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1696 #endif // !__ABI_HARD__
1697       break;
1698     default : ShouldNotReachHere();
1699   }
1700 #endif // __SOFTFP__
1701 }
1702 
1703 
1704 void TemplateTable::dop2(Operation op) {
1705   transition(dtos, dtos);
1706 #ifdef __SOFTFP__
1707   __ mov(R2, R0_tos_lo);
1708   __ mov(R3, R1_tos_hi);
1709   __ pop_l(R0, R1);
1710   switch (op) {
1711     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1712     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1713     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1714     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1715     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1716     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1717     default : ShouldNotReachHere();
1718   }
1719 #else
1720   const FloatRegister arg1 = D1_tmp;
1721   const FloatRegister arg2 = D0_tos;
1722 
1723   switch (op) {
1724     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1725     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1726     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1727     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1728     case rem:
1729 #ifndef __ABI_HARD__
1730       __ pop_d(arg1);
1731       __ fmrrd(R0, R1, arg1);
1732       __ fmrrd(R2, R3, arg2);
1733       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1734       __ fmdrr(D0_tos, R0, R1);
1735 #else
1736       __ mov_double(D1, arg2);
1737       __ pop_d(D0);
1738       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1739 #endif // !__ABI_HARD__
1740       break;
1741     default : ShouldNotReachHere();
1742   }
1743 #endif // __SOFTFP__
1744 }
1745 
1746 
1747 void TemplateTable::ineg() {
1748   transition(itos, itos);
1749   __ neg_32(R0_tos, R0_tos);
1750 }
1751 
1752 
1753 void TemplateTable::lneg() {
1754   transition(ltos, ltos);
1755 #ifdef AARCH64
1756   __ neg(R0_tos, R0_tos);
1757 #else
1758   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1759   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1760 #endif // AARCH64
1761 }
1762 
1763 
1764 void TemplateTable::fneg() {
1765   transition(ftos, ftos);
1766 #ifdef __SOFTFP__
1767   // Invert sign bit
1768   const int sign_mask = 0x80000000;
1769   __ eor(R0_tos, R0_tos, sign_mask);
1770 #else
1771   __ neg_float(S0_tos, S0_tos);
1772 #endif // __SOFTFP__
1773 }
1774 
1775 
1776 void TemplateTable::dneg() {
1777   transition(dtos, dtos);
1778 #ifdef __SOFTFP__
1779   // Invert sign bit in the high part of the double
1780   const int sign_mask_hi = 0x80000000;
1781   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1782 #else
1783   __ neg_double(D0_tos, D0_tos);
1784 #endif // __SOFTFP__
1785 }
1786 
1787 
1788 void TemplateTable::iinc() {
1789   transition(vtos, vtos);
1790   const Register Rconst = R2_tmp;
1791   const Register Rlocal_index = R1_tmp;
1792   const Register Rval = R0_tmp;
1793 
1794   __ ldrsb(Rconst, at_bcp(2));
1795   locals_index(Rlocal_index);
1796   Address local = load_iaddress(Rlocal_index, Rtemp);
1797   __ ldr_s32(Rval, local);
1798   __ add(Rval, Rval, Rconst);
1799   __ str_32(Rval, local);
1800 }
1801 
1802 
1803 void TemplateTable::wide_iinc() {
1804   transition(vtos, vtos);
1805   const Register Rconst = R2_tmp;
1806   const Register Rlocal_index = R1_tmp;
1807   const Register Rval = R0_tmp;
1808 
1809   // get constant in Rconst
1810   __ ldrsb(R2_tmp, at_bcp(4));
1811   __ ldrb(R3_tmp, at_bcp(5));
1812   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1813 
1814   locals_index_wide(Rlocal_index);
1815   Address local = load_iaddress(Rlocal_index, Rtemp);
1816   __ ldr_s32(Rval, local);
1817   __ add(Rval, Rval, Rconst);
1818   __ str_32(Rval, local);
1819 }
1820 
1821 
1822 void TemplateTable::convert() {
1823   // Checking
1824 #ifdef ASSERT
1825   { TosState tos_in  = ilgl;
1826     TosState tos_out = ilgl;
1827     switch (bytecode()) {
1828       case Bytecodes::_i2l: // fall through
1829       case Bytecodes::_i2f: // fall through
1830       case Bytecodes::_i2d: // fall through
1831       case Bytecodes::_i2b: // fall through
1832       case Bytecodes::_i2c: // fall through
1833       case Bytecodes::_i2s: tos_in = itos; break;
1834       case Bytecodes::_l2i: // fall through
1835       case Bytecodes::_l2f: // fall through
1836       case Bytecodes::_l2d: tos_in = ltos; break;
1837       case Bytecodes::_f2i: // fall through
1838       case Bytecodes::_f2l: // fall through
1839       case Bytecodes::_f2d: tos_in = ftos; break;
1840       case Bytecodes::_d2i: // fall through
1841       case Bytecodes::_d2l: // fall through
1842       case Bytecodes::_d2f: tos_in = dtos; break;
1843       default             : ShouldNotReachHere();
1844     }
1845     switch (bytecode()) {
1846       case Bytecodes::_l2i: // fall through
1847       case Bytecodes::_f2i: // fall through
1848       case Bytecodes::_d2i: // fall through
1849       case Bytecodes::_i2b: // fall through
1850       case Bytecodes::_i2c: // fall through
1851       case Bytecodes::_i2s: tos_out = itos; break;
1852       case Bytecodes::_i2l: // fall through
1853       case Bytecodes::_f2l: // fall through
1854       case Bytecodes::_d2l: tos_out = ltos; break;
1855       case Bytecodes::_i2f: // fall through
1856       case Bytecodes::_l2f: // fall through
1857       case Bytecodes::_d2f: tos_out = ftos; break;
1858       case Bytecodes::_i2d: // fall through
1859       case Bytecodes::_l2d: // fall through
1860       case Bytecodes::_f2d: tos_out = dtos; break;
1861       default             : ShouldNotReachHere();
1862     }
1863     transition(tos_in, tos_out);
1864   }
1865 #endif // ASSERT
1866 
1867   // Conversion
1868   switch (bytecode()) {
1869     case Bytecodes::_i2l:
1870 #ifdef AARCH64
1871       __ sign_extend(R0_tos, R0_tos, 32);
1872 #else
1873       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1874 #endif // AARCH64
1875       break;
1876 
1877     case Bytecodes::_i2f:
1878 #ifdef AARCH64
1879       __ scvtf_sw(S0_tos, R0_tos);
1880 #else
1881 #ifdef __SOFTFP__
1882       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1883 #else
1884       __ fmsr(S0_tmp, R0_tos);
1885       __ fsitos(S0_tos, S0_tmp);
1886 #endif // __SOFTFP__
1887 #endif // AARCH64
1888       break;
1889 
1890     case Bytecodes::_i2d:
1891 #ifdef AARCH64
1892       __ scvtf_dw(D0_tos, R0_tos);
1893 #else
1894 #ifdef __SOFTFP__
1895       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1896 #else
1897       __ fmsr(S0_tmp, R0_tos);
1898       __ fsitod(D0_tos, S0_tmp);
1899 #endif // __SOFTFP__
1900 #endif // AARCH64
1901       break;
1902 
1903     case Bytecodes::_i2b:
1904       __ sign_extend(R0_tos, R0_tos, 8);
1905       break;
1906 
1907     case Bytecodes::_i2c:
1908       __ zero_extend(R0_tos, R0_tos, 16);
1909       break;
1910 
1911     case Bytecodes::_i2s:
1912       __ sign_extend(R0_tos, R0_tos, 16);
1913       break;
1914 
1915     case Bytecodes::_l2i:
1916       /* nothing to do */
1917       break;
1918 
1919     case Bytecodes::_l2f:
1920 #ifdef AARCH64
1921       __ scvtf_sx(S0_tos, R0_tos);
1922 #else
1923       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1924 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1925       __ fmsr(S0_tos, R0);
1926 #endif // !__SOFTFP__ && !__ABI_HARD__
1927 #endif // AARCH64
1928       break;
1929 
1930     case Bytecodes::_l2d:
1931 #ifdef AARCH64
1932       __ scvtf_dx(D0_tos, R0_tos);
1933 #else
1934       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1935 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1936       __ fmdrr(D0_tos, R0, R1);
1937 #endif // !__SOFTFP__ && !__ABI_HARD__
1938 #endif // AARCH64
1939       break;
1940 
1941     case Bytecodes::_f2i:
1942 #ifdef AARCH64
1943       __ fcvtzs_ws(R0_tos, S0_tos);
1944 #else
1945 #ifndef __SOFTFP__
1946       __ ftosizs(S0_tos, S0_tos);
1947       __ fmrs(R0_tos, S0_tos);
1948 #else
1949       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1950 #endif // !__SOFTFP__
1951 #endif // AARCH64
1952       break;
1953 
1954     case Bytecodes::_f2l:
1955 #ifdef AARCH64
1956       __ fcvtzs_xs(R0_tos, S0_tos);
1957 #else
1958 #ifndef __SOFTFP__
1959       __ fmrs(R0_tos, S0_tos);
1960 #endif // !__SOFTFP__
1961       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
1962 #endif // AARCH64
1963       break;
1964 
1965     case Bytecodes::_f2d:
1966 #ifdef __SOFTFP__
1967       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
1968 #else
1969       __ convert_f2d(D0_tos, S0_tos);
1970 #endif // __SOFTFP__
1971       break;
1972 
1973     case Bytecodes::_d2i:
1974 #ifdef AARCH64
1975       __ fcvtzs_wd(R0_tos, D0_tos);
1976 #else
1977 #ifndef __SOFTFP__
1978       __ ftosizd(Stemp, D0);
1979       __ fmrs(R0, Stemp);
1980 #else
1981       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
1982 #endif // !__SOFTFP__
1983 #endif // AARCH64
1984       break;
1985 
1986     case Bytecodes::_d2l:
1987 #ifdef AARCH64
1988       __ fcvtzs_xd(R0_tos, D0_tos);
1989 #else
1990 #ifndef __SOFTFP__
1991       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
1992 #endif // !__SOFTFP__
1993       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
1994 #endif // AARCH64
1995       break;
1996 
1997     case Bytecodes::_d2f:
1998 #ifdef __SOFTFP__
1999       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
2000 #else
2001       __ convert_d2f(S0_tos, D0_tos);
2002 #endif // __SOFTFP__
2003       break;
2004 
2005     default:
2006       ShouldNotReachHere();
2007   }
2008 }
2009 
2010 
2011 void TemplateTable::lcmp() {
2012   transition(ltos, itos);
2013 #ifdef AARCH64
2014   const Register arg1 = R1_tmp;
2015   const Register arg2 = R0_tos;
2016 
2017   __ pop_l(arg1);
2018 
2019   __ cmp(arg1, arg2);
2020   __ cset(R0_tos, gt);               // 1 if '>', else 0
2021   __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2022 #else
2023   const Register arg1_lo = R2_tmp;
2024   const Register arg1_hi = R3_tmp;
2025   const Register arg2_lo = R0_tos_lo;
2026   const Register arg2_hi = R1_tos_hi;
2027   const Register res = R4_tmp;
2028 
2029   __ pop_l(arg1_lo, arg1_hi);
2030 
2031   // long compare arg1 with arg2
2032   // result is -1/0/+1 if '<'/'='/'>'
2033   Label done;
2034 
2035   __ mov (res, 0);
2036   __ cmp (arg1_hi, arg2_hi);
2037   __ mvn (res, 0, lt);
2038   __ mov (res, 1, gt);
2039   __ b(done, ne);
2040   __ cmp (arg1_lo, arg2_lo);
2041   __ mvn (res, 0, lo);
2042   __ mov (res, 1, hi);
2043   __ bind(done);
2044   __ mov (R0_tos, res);
2045 #endif // AARCH64
2046 }
2047 
2048 
2049 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2050   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2051 
2052 #ifdef AARCH64
2053   if (is_float) {
2054     transition(ftos, itos);
2055     __ pop_f(S1_tmp);
2056     __ fcmp_s(S1_tmp, S0_tos);
2057   } else {
2058     transition(dtos, itos);
2059     __ pop_d(D1_tmp);
2060     __ fcmp_d(D1_tmp, D0_tos);
2061   }
2062 
2063   if (unordered_result < 0) {
2064     __ cset(R0_tos, gt);               // 1 if '>', else 0
2065     __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2066   } else {
2067     __ cset(R0_tos, hi);               // 1 if '>' or unordered, else 0
2068     __ csinv(R0_tos, R0_tos, ZR, pl);  // previous value if '>=' or unordered, else -1
2069   }
2070 
2071 #else
2072 
2073 #ifdef __SOFTFP__
2074 
2075   if (is_float) {
2076     transition(ftos, itos);
2077     const Register Rx = R0;
2078     const Register Ry = R1;
2079 
2080     __ mov(Ry, R0_tos);
2081     __ pop_i(Rx);
2082 
2083     if (unordered_result == 1) {
2084       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2085     } else {
2086       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2087     }
2088 
2089   } else {
2090 
2091     transition(dtos, itos);
2092     const Register Rx_lo = R0;
2093     const Register Rx_hi = R1;
2094     const Register Ry_lo = R2;
2095     const Register Ry_hi = R3;
2096 
2097     __ mov(Ry_lo, R0_tos_lo);
2098     __ mov(Ry_hi, R1_tos_hi);
2099     __ pop_l(Rx_lo, Rx_hi);
2100 
2101     if (unordered_result == 1) {
2102       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2103     } else {
2104       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2105     }
2106   }
2107 
2108 #else
2109 
2110   if (is_float) {
2111     transition(ftos, itos);
2112     __ pop_f(S1_tmp);
2113     __ fcmps(S1_tmp, S0_tos);
2114   } else {
2115     transition(dtos, itos);
2116     __ pop_d(D1_tmp);
2117     __ fcmpd(D1_tmp, D0_tos);
2118   }
2119 
2120   __ fmstat();
2121 
2122   // comparison result | flag N | flag Z | flag C | flag V
2123   // "<"               |   1    |   0    |   0    |   0
2124   // "=="              |   0    |   1    |   1    |   0
2125   // ">"               |   0    |   0    |   1    |   0
2126   // unordered         |   0    |   0    |   1    |   1
2127 
2128   if (unordered_result < 0) {
2129     __ mov(R0_tos, 1);           // result ==  1 if greater
2130     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2131   } else {
2132     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2133     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2134   }
2135   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2136 #endif // __SOFTFP__
2137 #endif // AARCH64
2138 }
2139 
2140 
2141 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2142 
2143   const Register Rdisp = R0_tmp;
2144   const Register Rbumped_taken_count = R5_tmp;
2145 
2146   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2147 
2148   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2149                              InvocationCounter::counter_offset();
2150   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2151                               InvocationCounter::counter_offset();
2152   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2153 
2154   // Load up R0 with the branch displacement
2155   if (is_wide) {
2156     __ ldrsb(R0_tmp, at_bcp(1));
2157     __ ldrb(R1_tmp, at_bcp(2));
2158     __ ldrb(R2_tmp, at_bcp(3));
2159     __ ldrb(R3_tmp, at_bcp(4));
2160     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2161     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2162     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2163   } else {
2164     __ ldrsb(R0_tmp, at_bcp(1));
2165     __ ldrb(R1_tmp, at_bcp(2));
2166     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2167   }
2168 
2169   // Handle all the JSR stuff here, then exit.
2170   // It's much shorter and cleaner than intermingling with the
2171   // non-JSR normal-branch stuff occuring below.
2172   if (is_jsr) {
2173     // compute return address as bci in R1
2174     const Register Rret_addr = R1_tmp;
2175     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2176 
2177     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2178     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2179     __ sub(Rret_addr, Rret_addr, Rtemp);
2180 
2181     // Load the next target bytecode into R3_bytecode and advance Rbcp
2182 #ifdef AARCH64
2183     __ add(Rbcp, Rbcp, Rdisp);
2184     __ ldrb(R3_bytecode, Address(Rbcp));
2185 #else
2186     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2187 #endif // AARCH64
2188 
2189     // Push return address
2190     __ push_i(Rret_addr);
2191     // jsr returns vtos
2192     __ dispatch_only_noverify(vtos);
2193     return;
2194   }
2195 
2196   // Normal (non-jsr) branch handling
2197 
2198   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2199 #ifdef AARCH64
2200   __ add(Rbcp, Rbcp, Rdisp);
2201   __ ldrb(R3_bytecode, Address(Rbcp));
2202 #else
2203   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2204 #endif // AARCH64
2205 
2206   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2207   Label backedge_counter_overflow;
2208   Label profile_method;
2209   Label dispatch;
2210 
2211   if (UseLoopCounter) {
2212     // increment backedge counter for backward branches
2213     // Rdisp (R0): target offset
2214 
2215     const Register Rcnt = R2_tmp;
2216     const Register Rcounters = R1_tmp;
2217 
2218     // count only if backward branch
2219 #ifdef AARCH64
2220     __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2221 #else
2222     __ tst(Rdisp, Rdisp);
2223     __ b(dispatch, pl);
2224 #endif // AARCH64
2225 
2226     if (TieredCompilation) {
2227       Label no_mdo;
2228       int increment = InvocationCounter::count_increment;
2229       if (ProfileInterpreter) {
2230         // Are we profiling?
2231         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2232         __ cbz(Rtemp, no_mdo);
2233         // Increment the MDO backedge counter
2234         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2235                                                   in_bytes(InvocationCounter::counter_offset()));
2236         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2237         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2238                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2239         __ b(dispatch);
2240       }
2241       __ bind(no_mdo);
2242       // Increment backedge counter in MethodCounters*
2243       // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2244       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2245                              Rdisp, R3_bytecode,
2246                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2247       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2248       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2249                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2250     } else {
2251       // Increment backedge counter in MethodCounters*
2252       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2253                              Rdisp, R3_bytecode,
2254                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2255       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2256       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2257       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2258 
2259       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2260 #ifdef AARCH64
2261       __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value);  // and the status bits
2262 #else
2263       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2264 #endif // AARCH64
2265       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2266 
2267       if (ProfileInterpreter) {
2268         // Test to see if we should create a method data oop
2269         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2270         __ ldr_s32(Rtemp, profile_limit);
2271         __ cmp_32(Rcnt, Rtemp);
2272         __ b(dispatch, lt);
2273 
2274         // if no method data exists, go to profile method
2275         __ test_method_data_pointer(R4_tmp, profile_method);
2276 
2277         if (UseOnStackReplacement) {
2278           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2279           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2280           __ ldr_s32(Rtemp, backward_branch_limit);
2281           __ cmp(Rbumped_taken_count, Rtemp);
2282           __ b(dispatch, lo);
2283 
2284           // When ProfileInterpreter is on, the backedge_count comes from the
2285           // MethodData*, which value does not get reset on the call to
2286           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2287           // routine while the method is being compiled, add a second test to make
2288           // sure the overflow function is called only once every overflow_frequency.
2289           const int overflow_frequency = 1024;
2290 
2291 #ifdef AARCH64
2292           __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2293 #else
2294           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2295           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2296           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2297 #endif // AARCH64
2298 
2299           __ b(backedge_counter_overflow, eq);
2300         }
2301       } else {
2302         if (UseOnStackReplacement) {
2303           // check for overflow against Rcnt, which is the sum of the counters
2304           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2305           __ ldr_s32(Rtemp, backward_branch_limit);
2306           __ cmp_32(Rcnt, Rtemp);
2307           __ b(backedge_counter_overflow, hs);
2308 
2309         }
2310       }
2311     }
2312     __ bind(dispatch);
2313   }
2314 
2315   if (!UseOnStackReplacement) {
2316     __ bind(backedge_counter_overflow);
2317   }
2318 
2319   // continue with the bytecode @ target
2320   __ dispatch_only(vtos);
2321 
2322   if (UseLoopCounter) {
2323     if (ProfileInterpreter) {
2324       // Out-of-line code to allocate method data oop.
2325       __ bind(profile_method);
2326 
2327       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2328       __ set_method_data_pointer_for_bcp();
2329       // reload next bytecode
2330       __ ldrb(R3_bytecode, Address(Rbcp));
2331       __ b(dispatch);
2332     }
2333 
2334     if (UseOnStackReplacement) {
2335       // invocation counter overflow
2336       __ bind(backedge_counter_overflow);
2337 
2338       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2339       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2340 
2341       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2342       const Register Rnmethod = R0;
2343 
2344       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2345 
2346       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2347 
2348       // nmethod may have been invalidated (VM may block upon call_VM return)
2349       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2350       __ cmp(R1_tmp, nmethod::in_use);
2351       __ b(dispatch, ne);
2352 
2353       // We have the address of an on stack replacement routine in Rnmethod,
2354       // We need to prepare to execute the OSR method. First we must
2355       // migrate the locals and monitors off of the stack.
2356 
2357       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2358 
2359       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2360 
2361       // R0 is OSR buffer
2362 
2363       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2364       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2365 
2366 #ifdef AARCH64
2367       __ ldp(FP, LR, Address(FP));
2368       __ mov(SP, Rtemp);
2369 #else
2370       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2371       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2372 #endif // AARCH64
2373 
2374       __ jump(R1_tmp);
2375     }
2376   }
2377 }
2378 
2379 
2380 void TemplateTable::if_0cmp(Condition cc) {
2381   transition(itos, vtos);
2382   // assume branch is more often taken than not (loops use backward branches)
2383   Label not_taken;
2384 #ifdef AARCH64
2385   if (cc == equal) {
2386     __ cbnz_w(R0_tos, not_taken);
2387   } else if (cc == not_equal) {
2388     __ cbz_w(R0_tos, not_taken);
2389   } else {
2390     __ cmp_32(R0_tos, 0);
2391     __ b(not_taken, convNegCond(cc));
2392   }
2393 #else
2394   __ cmp_32(R0_tos, 0);
2395   __ b(not_taken, convNegCond(cc));
2396 #endif // AARCH64
2397   branch(false, false);
2398   __ bind(not_taken);
2399   __ profile_not_taken_branch(R0_tmp);
2400 }
2401 
2402 
2403 void TemplateTable::if_icmp(Condition cc) {
2404   transition(itos, vtos);
2405   // assume branch is more often taken than not (loops use backward branches)
2406   Label not_taken;
2407   __ pop_i(R1_tmp);
2408   __ cmp_32(R1_tmp, R0_tos);
2409   __ b(not_taken, convNegCond(cc));
2410   branch(false, false);
2411   __ bind(not_taken);
2412   __ profile_not_taken_branch(R0_tmp);
2413 }
2414 
2415 
2416 void TemplateTable::if_nullcmp(Condition cc) {
2417   transition(atos, vtos);
2418   assert(cc == equal || cc == not_equal, "invalid condition");
2419 
2420   // assume branch is more often taken than not (loops use backward branches)
2421   Label not_taken;
2422   if (cc == equal) {
2423     __ cbnz(R0_tos, not_taken);
2424   } else {
2425     __ cbz(R0_tos, not_taken);
2426   }
2427   branch(false, false);
2428   __ bind(not_taken);
2429   __ profile_not_taken_branch(R0_tmp);
2430 }
2431 
2432 
2433 void TemplateTable::if_acmp(Condition cc) {
2434   transition(atos, vtos);
2435   // assume branch is more often taken than not (loops use backward branches)
2436   Label not_taken;
2437   __ pop_ptr(R1_tmp);
2438   __ cmp(R1_tmp, R0_tos);
2439   __ b(not_taken, convNegCond(cc));
2440   branch(false, false);
2441   __ bind(not_taken);
2442   __ profile_not_taken_branch(R0_tmp);
2443 }
2444 
2445 
2446 void TemplateTable::ret() {
2447   transition(vtos, vtos);
2448   const Register Rlocal_index = R1_tmp;
2449   const Register Rret_bci = Rtmp_save0; // R4/R19
2450 
2451   locals_index(Rlocal_index);
2452   Address local = load_iaddress(Rlocal_index, Rtemp);
2453   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2454   __ profile_ret(Rtmp_save1, Rret_bci);
2455   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2456   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2457   __ add(Rbcp, Rtemp, Rret_bci);
2458   __ dispatch_next(vtos);
2459 }
2460 
2461 
2462 void TemplateTable::wide_ret() {
2463   transition(vtos, vtos);
2464   const Register Rlocal_index = R1_tmp;
2465   const Register Rret_bci = Rtmp_save0; // R4/R19
2466 
2467   locals_index_wide(Rlocal_index);
2468   Address local = load_iaddress(Rlocal_index, Rtemp);
2469   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2470   __ profile_ret(Rtmp_save1, Rret_bci);
2471   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2472   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2473   __ add(Rbcp, Rtemp, Rret_bci);
2474   __ dispatch_next(vtos);
2475 }
2476 
2477 
2478 void TemplateTable::tableswitch() {
2479   transition(itos, vtos);
2480 
2481   const Register Rindex  = R0_tos;
2482 #ifndef AARCH64
2483   const Register Rtemp2  = R1_tmp;
2484 #endif // !AARCH64
2485   const Register Rabcp   = R2_tmp;  // aligned bcp
2486   const Register Rlow    = R3_tmp;
2487   const Register Rhigh   = R4_tmp;
2488   const Register Roffset = R5_tmp;
2489 
2490   // align bcp
2491   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2492   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2493 
2494   // load lo & hi
2495 #ifdef AARCH64
2496   __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2497 #else
2498   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2499 #endif // AARCH64
2500   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2501   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2502 
2503   // compare index with high bound
2504   __ cmp_32(Rhigh, Rindex);
2505 
2506 #ifdef AARCH64
2507   Label default_case, do_dispatch;
2508   __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2509   __ b(default_case, lt);
2510 
2511   __ sub_w(Rindex, Rindex, Rlow);
2512   __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2513   if(ProfileInterpreter) {
2514     __ sxtw(Rindex, Rindex);
2515     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2516   }
2517   __ b(do_dispatch);
2518 
2519   __ bind(default_case);
2520   __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2521   if(ProfileInterpreter) {
2522     __ profile_switch_default(R0_tmp);
2523   }
2524 
2525   __ bind(do_dispatch);
2526 #else
2527 
2528   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2529   __ subs(Rindex, Rindex, Rlow, ge);
2530 
2531   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2532   // ("ge" status accumulated from cmp and subs instructions) then load
2533   // offset from table, otherwise load offset for default case
2534 
2535   if(ProfileInterpreter) {
2536     Label default_case, continue_execution;
2537 
2538     __ b(default_case, lt);
2539     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2540     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2541     __ b(continue_execution);
2542 
2543     __ bind(default_case);
2544     __ profile_switch_default(R0_tmp);
2545     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2546 
2547     __ bind(continue_execution);
2548   } else {
2549     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2550     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2551   }
2552 #endif // AARCH64
2553 
2554   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2555 
2556   // load the next bytecode to R3_bytecode and advance Rbcp
2557 #ifdef AARCH64
2558   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2559   __ ldrb(R3_bytecode, Address(Rbcp));
2560 #else
2561   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2562 #endif // AARCH64
2563   __ dispatch_only(vtos);
2564 
2565 }
2566 
2567 
2568 void TemplateTable::lookupswitch() {
2569   transition(itos, itos);
2570   __ stop("lookupswitch bytecode should have been rewritten");
2571 }
2572 
2573 
2574 void TemplateTable::fast_linearswitch() {
2575   transition(itos, vtos);
2576   Label loop, found, default_case, continue_execution;
2577 
2578   const Register Rkey     = R0_tos;
2579   const Register Rabcp    = R2_tmp;  // aligned bcp
2580   const Register Rdefault = R3_tmp;
2581   const Register Rcount   = R4_tmp;
2582   const Register Roffset  = R5_tmp;
2583 
2584   // bswap Rkey, so we can avoid bswapping the table entries
2585   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2586 
2587   // align bcp
2588   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2589   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2590 
2591   // load default & counter
2592 #ifdef AARCH64
2593   __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2594 #else
2595   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2596 #endif // AARCH64
2597   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2598 
2599 #ifdef AARCH64
2600   __ cbz_w(Rcount, default_case);
2601 #else
2602   __ cmp_32(Rcount, 0);
2603   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2604   __ b(default_case, eq);
2605 #endif // AARCH64
2606 
2607   // table search
2608   __ bind(loop);
2609 #ifdef AARCH64
2610   __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2611 #endif // AARCH64
2612   __ cmp_32(Rtemp, Rkey);
2613   __ b(found, eq);
2614   __ subs(Rcount, Rcount, 1);
2615 #ifndef AARCH64
2616   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2617 #endif // !AARCH64
2618   __ b(loop, ne);
2619 
2620   // default case
2621   __ bind(default_case);
2622   __ profile_switch_default(R0_tmp);
2623   __ mov(Roffset, Rdefault);
2624   __ b(continue_execution);
2625 
2626   // entry found -> get offset
2627   __ bind(found);
2628   // Rabcp is already incremented and points to the next entry
2629   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2630   if (ProfileInterpreter) {
2631     // Calculate index of the selected case.
2632     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2633 
2634     // align bcp
2635     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2636     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2637 
2638     // load number of cases
2639     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2640     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2641 
2642     // Selected index = <number of cases> - <current loop count>
2643     __ sub(R1_tmp, R2_tmp, Rcount);
2644     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2645   }
2646 
2647   // continue execution
2648   __ bind(continue_execution);
2649   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2650 
2651   // load the next bytecode to R3_bytecode and advance Rbcp
2652 #ifdef AARCH64
2653   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2654   __ ldrb(R3_bytecode, Address(Rbcp));
2655 #else
2656   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2657 #endif // AARCH64
2658   __ dispatch_only(vtos);
2659 }
2660 
2661 
2662 void TemplateTable::fast_binaryswitch() {
2663   transition(itos, vtos);
2664   // Implementation using the following core algorithm:
2665   //
2666   // int binary_search(int key, LookupswitchPair* array, int n) {
2667   //   // Binary search according to "Methodik des Programmierens" by
2668   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2669   //   int i = 0;
2670   //   int j = n;
2671   //   while (i+1 < j) {
2672   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2673   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2674   //     // where a stands for the array and assuming that the (inexisting)
2675   //     // element a[n] is infinitely big.
2676   //     int h = (i + j) >> 1;
2677   //     // i < h < j
2678   //     if (key < array[h].fast_match()) {
2679   //       j = h;
2680   //     } else {
2681   //       i = h;
2682   //     }
2683   //   }
2684   //   // R: a[i] <= key < a[i+1] or Q
2685   //   // (i.e., if key is within array, i is the correct index)
2686   //   return i;
2687   // }
2688 
2689   // register allocation
2690   const Register key    = R0_tos;                // already set (tosca)
2691   const Register array  = R1_tmp;
2692   const Register i      = R2_tmp;
2693   const Register j      = R3_tmp;
2694   const Register h      = R4_tmp;
2695   const Register val    = R5_tmp;
2696   const Register temp1  = Rtemp;
2697   const Register temp2  = LR_tmp;
2698   const Register offset = R3_tmp;
2699 
2700   // set 'array' = aligned bcp + 2 ints
2701   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2702   __ align_reg(array, temp1, BytesPerInt);
2703 
2704   // initialize i & j
2705   __ mov(i, 0);                                  // i = 0;
2706   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2707   // Convert j into native byteordering
2708   __ byteswap_u32(j, temp1, temp2);
2709 
2710   // and start
2711   Label entry;
2712   __ b(entry);
2713 
2714   // binary search loop
2715   { Label loop;
2716     __ bind(loop);
2717     // int h = (i + j) >> 1;
2718     __ add(h, i, j);                             // h = i + j;
2719     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2720     // if (key < array[h].fast_match()) {
2721     //   j = h;
2722     // } else {
2723     //   i = h;
2724     // }
2725 #ifdef AARCH64
2726     __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2727     __ ldr_s32(val, Address(temp1));
2728 #else
2729     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2730 #endif // AARCH64
2731     // Convert array[h].match to native byte-ordering before compare
2732     __ byteswap_u32(val, temp1, temp2);
2733     __ cmp_32(key, val);
2734     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2735     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2736     // while (i+1 < j)
2737     __ bind(entry);
2738     __ add(temp1, i, 1);                             // i+1
2739     __ cmp(temp1, j);                                // i+1 < j
2740     __ b(loop, lt);
2741   }
2742 
2743   // end of binary search, result index is i (must check again!)
2744   Label default_case;
2745   // Convert array[i].match to native byte-ordering before compare
2746 #ifdef AARCH64
2747   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2748   __ ldr_s32(val, Address(temp1));
2749 #else
2750   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2751 #endif // AARCH64
2752   __ byteswap_u32(val, temp1, temp2);
2753   __ cmp_32(key, val);
2754   __ b(default_case, ne);
2755 
2756   // entry found
2757   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2758   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2759   __ profile_switch_case(R0, i, R1, i);
2760   __ byteswap_u32(offset, temp1, temp2);
2761 #ifdef AARCH64
2762   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2763   __ ldrb(R3_bytecode, Address(Rbcp));
2764 #else
2765   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2766 #endif // AARCH64
2767   __ dispatch_only(vtos);
2768 
2769   // default case
2770   __ bind(default_case);
2771   __ profile_switch_default(R0);
2772   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2773   __ byteswap_u32(offset, temp1, temp2);
2774 #ifdef AARCH64
2775   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2776   __ ldrb(R3_bytecode, Address(Rbcp));
2777 #else
2778   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2779 #endif // AARCH64
2780   __ dispatch_only(vtos);
2781 }
2782 
2783 
2784 void TemplateTable::_return(TosState state) {
2785   transition(state, state);
2786   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2787 
2788   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2789     Label skip_register_finalizer;
2790     assert(state == vtos, "only valid state");
2791     __ ldr(R1, aaddress(0));
2792     __ load_klass(Rtemp, R1);
2793     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2794     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2795 
2796     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2797 
2798     __ bind(skip_register_finalizer);
2799   }
2800 
2801   // Narrow result if state is itos but result type is smaller.
2802   // Need to narrow in the return bytecode rather than in generate_return_entry
2803   // since compiled code callers expect the result to already be narrowed.
2804   if (state == itos) {
2805     __ narrow(R0_tos);
2806   }
2807   __ remove_activation(state, LR);
2808 
2809   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2810 
2811 #ifndef AARCH64
2812   // According to interpreter calling conventions, result is returned in R0/R1,
2813   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2814   // This conversion should be done after remove_activation, as it uses
2815   // push(state) & pop(state) to preserve return value.
2816   __ convert_tos_to_retval(state);
2817 #endif // !AARCH64
2818 
2819   __ ret();
2820 
2821   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2822   __ nop();
2823 }
2824 
2825 
2826 // ----------------------------------------------------------------------------
2827 // Volatile variables demand their effects be made known to all CPU's in
2828 // order.  Store buffers on most chips allow reads & writes to reorder; the
2829 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2830 // memory barrier (i.e., it's not sufficient that the interpreter does not
2831 // reorder volatile references, the hardware also must not reorder them).
2832 //
2833 // According to the new Java Memory Model (JMM):
2834 // (1) All volatiles are serialized wrt to each other.
2835 // ALSO reads & writes act as aquire & release, so:
2836 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2837 // the read float up to before the read.  It's OK for non-volatile memory refs
2838 // that happen before the volatile read to float down below it.
2839 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2840 // that happen BEFORE the write float down to after the write.  It's OK for
2841 // non-volatile memory refs that happen after the volatile write to float up
2842 // before it.
2843 //
2844 // We only put in barriers around volatile refs (they are expensive), not
2845 // _between_ memory refs (that would require us to track the flavor of the
2846 // previous memory refs).  Requirements (2) and (3) require some barriers
2847 // before volatile stores and after volatile loads.  These nearly cover
2848 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2849 // case is placed after volatile-stores although it could just as well go
2850 // before volatile-loads.
2851 // TODO-AARCH64: consider removing extra unused parameters
2852 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2853                                      Register tmp,
2854                                      bool preserve_flags,
2855                                      Register load_tgt) {
2856 #ifdef AARCH64
2857   __ membar(order_constraint);
2858 #else
2859   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2860 #endif
2861 }
2862 
2863 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2864 void TemplateTable::resolve_cache_and_index(int byte_no,
2865                                             Register Rcache,
2866                                             Register Rindex,
2867                                             size_t index_size) {
2868   assert_different_registers(Rcache, Rindex, Rtemp);
2869 
2870   Label resolved;
2871   Bytecodes::Code code = bytecode();
2872   switch (code) {
2873   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2874   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2875   }
2876 
2877   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2878   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
2879   __ cmp(Rtemp, code);  // have we resolved this bytecode?
2880   __ b(resolved, eq);
2881 
2882   // resolve first time through
2883   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2884   __ mov(R1, code);
2885   __ call_VM(noreg, entry, R1);
2886   // Update registers with resolved info
2887   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
2888   __ bind(resolved);
2889 }
2890 
2891 
2892 // The Rcache and Rindex registers must be set before call
2893 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
2894                                               Register Rindex,
2895                                               Register Roffset,
2896                                               Register Rflags,
2897                                               Register Robj,
2898                                               bool is_static = false) {
2899 
2900   assert_different_registers(Rcache, Rindex, Rtemp);
2901   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
2902 
2903   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2904 
2905   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2906 
2907   // Field offset
2908   __ ldr(Roffset, Address(Rtemp,
2909            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2910 
2911   // Flags
2912   __ ldr_u32(Rflags, Address(Rtemp,
2913            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2914 
2915   if (is_static) {
2916     __ ldr(Robj, Address(Rtemp,
2917              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2918     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2919     __ ldr(Robj, Address(Robj, mirror_offset));
2920     __ resolve_oop_handle(Robj);
2921   }
2922 }
2923 
2924 
2925 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2926 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2927                                                Register method,
2928                                                Register itable_index,
2929                                                Register flags,
2930                                                bool is_invokevirtual,
2931                                                bool is_invokevfinal/*unused*/,
2932                                                bool is_invokedynamic) {
2933   // setup registers
2934   const Register cache = R2_tmp;
2935   const Register index = R3_tmp;
2936   const Register temp_reg = Rtemp;
2937   assert_different_registers(cache, index, temp_reg);
2938   assert_different_registers(method, itable_index, temp_reg);
2939 
2940   // determine constant pool cache field offsets
2941   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2942   const int method_offset = in_bytes(
2943     ConstantPoolCache::base_offset() +
2944       ((byte_no == f2_byte)
2945        ? ConstantPoolCacheEntry::f2_offset()
2946        : ConstantPoolCacheEntry::f1_offset()
2947       )
2948     );
2949   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2950                                     ConstantPoolCacheEntry::flags_offset());
2951   // access constant pool cache fields
2952   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2953                                     ConstantPoolCacheEntry::f2_offset());
2954 
2955   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2956   resolve_cache_and_index(byte_no, cache, index, index_size);
2957     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
2958     __ ldr(method, Address(temp_reg, method_offset));
2959 
2960   if (itable_index != noreg) {
2961     __ ldr(itable_index, Address(temp_reg, index_offset));
2962   }
2963   __ ldr_u32(flags, Address(temp_reg, flags_offset));
2964 }
2965 
2966 
2967 // The registers cache and index expected to be set before call, and should not be Rtemp.
2968 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
2969 // except cache and index registers which are preserved.
2970 void TemplateTable::jvmti_post_field_access(Register Rcache,
2971                                             Register Rindex,
2972                                             bool is_static,
2973                                             bool has_tos) {
2974   assert_different_registers(Rcache, Rindex, Rtemp);
2975 
2976   if (__ can_post_field_access()) {
2977     // Check to see if a field access watch has been set before we take
2978     // the time to call into the VM.
2979 
2980     Label Lcontinue;
2981 
2982     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
2983     __ cbz(Rtemp, Lcontinue);
2984 
2985     // cache entry pointer
2986     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2987     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
2988     if (is_static) {
2989       __ mov(R1, 0);        // NULL object reference
2990     } else {
2991       __ pop(atos);         // Get the object
2992       __ mov(R1, R0_tos);
2993       __ verify_oop(R1);
2994       __ push(atos);        // Restore stack state
2995     }
2996     // R1: object pointer or NULL
2997     // R2: cache entry pointer
2998     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2999                R1, R2);
3000     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3001 
3002     __ bind(Lcontinue);
3003   }
3004 }
3005 
3006 
3007 void TemplateTable::pop_and_check_object(Register r) {
3008   __ pop_ptr(r);
3009   __ null_check(r, Rtemp);  // for field access must check obj.
3010   __ verify_oop(r);
3011 }
3012 
3013 
3014 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3015   transition(vtos, vtos);
3016 
3017   const Register Roffset  = R2_tmp;
3018   const Register Robj     = R3_tmp;
3019   const Register Rcache   = R4_tmp;
3020   const Register Rflagsav = Rtmp_save0;  // R4/R19
3021   const Register Rindex   = R5_tmp;
3022   const Register Rflags   = R5_tmp;
3023 
3024   const bool gen_volatile_check = os::is_MP();
3025 
3026   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3027   jvmti_post_field_access(Rcache, Rindex, is_static, false);
3028   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3029 
3030   if (gen_volatile_check) {
3031     __ mov(Rflagsav, Rflags);
3032   }
3033 
3034   if (!is_static) pop_and_check_object(Robj);
3035 
3036   Label Done, Lint, Ltable, shouldNotReachHere;
3037   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3038 
3039   // compute type
3040   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3041   // Make sure we don't need to mask flags after the above shift
3042   ConstantPoolCacheEntry::verify_tos_state_shift();
3043 
3044   // There are actually two versions of implementation of getfield/getstatic:
3045   //
3046   // 32-bit ARM:
3047   // 1) Table switch using add(PC,...) instruction (fast_version)
3048   // 2) Table switch using ldr(PC,...) instruction
3049   //
3050   // AArch64:
3051   // 1) Table switch using adr/add/br instructions (fast_version)
3052   // 2) Table switch using adr/ldr/br instructions
3053   //
3054   // First version requires fixed size of code block for each case and
3055   // can not be used in RewriteBytecodes and VerifyOops
3056   // modes.
3057 
3058   // Size of fixed size code block for fast_version
3059   const int log_max_block_size = 2;
3060   const int max_block_size = 1 << log_max_block_size;
3061 
3062   // Decide if fast version is enabled
3063   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3064 
3065   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3066   // atos requires additional processing in slow version.
3067   // On AArch64 atos and itos cannot be merged.
3068   bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3069 
3070   assert(number_of_states == 10, "number of tos states should be equal to 9");
3071 
3072   __ cmp(Rflags, itos);
3073 #ifdef AARCH64
3074   __ b(Lint, eq);
3075 
3076   if(fast_version) {
3077     __ adr(Rtemp, Lbtos);
3078     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3079     __ br(Rtemp);
3080   } else {
3081     __ adr(Rtemp, Ltable);
3082     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3083     __ br(Rtemp);
3084   }
3085 #else
3086   if(atos_merged_with_itos) {
3087     __ cmp(Rflags, atos, ne);
3088   }
3089 
3090   // table switch by type
3091   if(fast_version) {
3092     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3093   } else {
3094     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3095   }
3096 
3097   // jump to itos/atos case
3098   __ b(Lint);
3099 #endif // AARCH64
3100 
3101   // table with addresses for slow version
3102   if (fast_version) {
3103     // nothing to do
3104   } else  {
3105     AARCH64_ONLY(__ align(wordSize));
3106     __ bind(Ltable);
3107     __ emit_address(Lbtos);
3108     __ emit_address(Lztos);
3109     __ emit_address(Lctos);
3110     __ emit_address(Lstos);
3111     __ emit_address(Litos);
3112     __ emit_address(Lltos);
3113     __ emit_address(Lftos);
3114     __ emit_address(Ldtos);
3115     __ emit_address(Latos);
3116   }
3117 
3118 #ifdef ASSERT
3119   int seq = 0;
3120 #endif
3121   // btos
3122   {
3123     assert(btos == seq++, "btos has unexpected value");
3124     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3125     __ bind(Lbtos);
3126     __ ldrsb(R0_tos, Address(Robj, Roffset));
3127     __ push(btos);
3128     // Rewrite bytecode to be faster
3129     if (!is_static && rc == may_rewrite) {
3130       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3131     }
3132     __ b(Done);
3133   }
3134 
3135   // ztos (same as btos for getfield)
3136   {
3137     assert(ztos == seq++, "btos has unexpected value");
3138     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3139     __ bind(Lztos);
3140     __ ldrsb(R0_tos, Address(Robj, Roffset));
3141     __ push(ztos);
3142     // Rewrite bytecode to be faster (use btos fast getfield)
3143     if (!is_static && rc == may_rewrite) {
3144       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3145     }
3146     __ b(Done);
3147   }
3148 
3149   // ctos
3150   {
3151     assert(ctos == seq++, "ctos has unexpected value");
3152     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3153     __ bind(Lctos);
3154     __ ldrh(R0_tos, Address(Robj, Roffset));
3155     __ push(ctos);
3156     if (!is_static && rc == may_rewrite) {
3157       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3158     }
3159     __ b(Done);
3160   }
3161 
3162   // stos
3163   {
3164     assert(stos == seq++, "stos has unexpected value");
3165     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3166     __ bind(Lstos);
3167     __ ldrsh(R0_tos, Address(Robj, Roffset));
3168     __ push(stos);
3169     if (!is_static && rc == may_rewrite) {
3170       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3171     }
3172     __ b(Done);
3173   }
3174 
3175   // itos
3176   {
3177     assert(itos == seq++, "itos has unexpected value");
3178     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3179     __ bind(Litos);
3180     __ b(shouldNotReachHere);
3181   }
3182 
3183   // ltos
3184   {
3185     assert(ltos == seq++, "ltos has unexpected value");
3186     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3187     __ bind(Lltos);
3188 #ifdef AARCH64
3189     __ ldr(R0_tos, Address(Robj, Roffset));
3190 #else
3191     __ add(Roffset, Robj, Roffset);
3192     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3193 #endif // AARCH64
3194     __ push(ltos);
3195     if (!is_static && rc == may_rewrite) {
3196       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3197     }
3198     __ b(Done);
3199   }
3200 
3201   // ftos
3202   {
3203     assert(ftos == seq++, "ftos has unexpected value");
3204     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3205     __ bind(Lftos);
3206     // floats and ints are placed on stack in same way, so
3207     // we can use push(itos) to transfer value without using VFP
3208     __ ldr_u32(R0_tos, Address(Robj, Roffset));
3209     __ push(itos);
3210     if (!is_static && rc == may_rewrite) {
3211       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3212     }
3213     __ b(Done);
3214   }
3215 
3216   // dtos
3217   {
3218     assert(dtos == seq++, "dtos has unexpected value");
3219     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3220     __ bind(Ldtos);
3221     // doubles and longs are placed on stack in the same way, so
3222     // we can use push(ltos) to transfer value without using VFP
3223 #ifdef AARCH64
3224     __ ldr(R0_tos, Address(Robj, Roffset));
3225 #else
3226     __ add(Rtemp, Robj, Roffset);
3227     __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3228 #endif // AARCH64
3229     __ push(ltos);
3230     if (!is_static && rc == may_rewrite) {
3231       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3232     }
3233     __ b(Done);
3234   }
3235 
3236   // atos
3237   {
3238     assert(atos == seq++, "atos has unexpected value");
3239 
3240     // atos case for AArch64 and slow version on 32-bit ARM
3241     if(!atos_merged_with_itos) {
3242       __ bind(Latos);
3243       do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3244       __ push(atos);
3245       // Rewrite bytecode to be faster
3246       if (!is_static && rc == may_rewrite) {
3247         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3248       }
3249       __ b(Done);
3250     }
3251   }
3252 
3253   assert(vtos == seq++, "vtos has unexpected value");
3254 
3255   __ bind(shouldNotReachHere);
3256   __ should_not_reach_here();
3257 
3258   // itos and atos cases are frequent so it makes sense to move them out of table switch
3259   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3260 
3261   __ bind(Lint);
3262   __ ldr_s32(R0_tos, Address(Robj, Roffset));
3263   __ push(itos);
3264   // Rewrite bytecode to be faster
3265   if (!is_static && rc == may_rewrite) {
3266     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3267   }
3268 
3269   __ bind(Done);
3270 
3271   if (gen_volatile_check) {
3272     // Check for volatile field
3273     Label notVolatile;
3274     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3275 
3276     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3277 
3278     __ bind(notVolatile);
3279   }
3280 
3281 }
3282 
3283 void TemplateTable::getfield(int byte_no) {
3284   getfield_or_static(byte_no, false);
3285 }
3286 
3287 void TemplateTable::nofast_getfield(int byte_no) {
3288   getfield_or_static(byte_no, false, may_not_rewrite);
3289 }
3290 
3291 void TemplateTable::getstatic(int byte_no) {
3292   getfield_or_static(byte_no, true);
3293 }
3294 
3295 
3296 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3297 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3298 // except cache and index registers which are preserved.
3299 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3300   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3301   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3302 
3303   if (__ can_post_field_modification()) {
3304     // Check to see if a field modification watch has been set before we take
3305     // the time to call into the VM.
3306     Label Lcontinue;
3307 
3308     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3309     __ cbz(Rtemp, Lcontinue);
3310 
3311     if (is_static) {
3312       // Life is simple.  Null out the object pointer.
3313       __ mov(R1, 0);
3314     } else {
3315       // Life is harder. The stack holds the value on top, followed by the object.
3316       // We don't know the size of the value, though; it could be one or two words
3317       // depending on its type. As a result, we must find the type to determine where
3318       // the object is.
3319 
3320       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3321       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3322 
3323       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3324       // Make sure we don't need to mask Rtemp after the above shift
3325       ConstantPoolCacheEntry::verify_tos_state_shift();
3326 
3327       __ cmp(Rtemp, ltos);
3328       __ cond_cmp(Rtemp, dtos, ne);
3329 #ifdef AARCH64
3330       __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3331       __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3332       __ mov(R1, Rtemp, eq);
3333       __ ldr(R1, Address(Rstack_top, R1));
3334 #else
3335       // two word value (ltos/dtos)
3336       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3337 
3338       // one word value (not ltos, dtos)
3339       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3340 #endif // AARCH64
3341     }
3342 
3343     // cache entry pointer
3344     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3345     __ add(R2, R2, in_bytes(cp_base_offset));
3346 
3347     // object (tos)
3348     __ mov(R3, Rstack_top);
3349 
3350     // R1: object pointer set up above (NULL if static)
3351     // R2: cache entry pointer
3352     // R3: value object on the stack
3353     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3354                R1, R2, R3);
3355     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3356 
3357     __ bind(Lcontinue);
3358   }
3359 }
3360 
3361 
3362 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3363   transition(vtos, vtos);
3364 
3365   const Register Roffset  = R2_tmp;
3366   const Register Robj     = R3_tmp;
3367   const Register Rcache   = R4_tmp;
3368   const Register Rflagsav = Rtmp_save0;  // R4/R19
3369   const Register Rindex   = R5_tmp;
3370   const Register Rflags   = R5_tmp;
3371 
3372   const bool gen_volatile_check = os::is_MP();
3373 
3374   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3375   jvmti_post_field_mod(Rcache, Rindex, is_static);
3376   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3377 
3378   if (gen_volatile_check) {
3379     // Check for volatile field
3380     Label notVolatile;
3381     __ mov(Rflagsav, Rflags);
3382     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3383 
3384     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3385 
3386     __ bind(notVolatile);
3387   }
3388 
3389   Label Done, Lint, shouldNotReachHere;
3390   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3391 
3392   // compute type
3393   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3394   // Make sure we don't need to mask flags after the above shift
3395   ConstantPoolCacheEntry::verify_tos_state_shift();
3396 
3397   // There are actually two versions of implementation of putfield/putstatic:
3398   //
3399   // 32-bit ARM:
3400   // 1) Table switch using add(PC,...) instruction (fast_version)
3401   // 2) Table switch using ldr(PC,...) instruction
3402   //
3403   // AArch64:
3404   // 1) Table switch using adr/add/br instructions (fast_version)
3405   // 2) Table switch using adr/ldr/br instructions
3406   //
3407   // First version requires fixed size of code block for each case and
3408   // can not be used in RewriteBytecodes and VerifyOops
3409   // modes.
3410 
3411   // Size of fixed size code block for fast_version (in instructions)
3412   const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3413   const int max_block_size = 1 << log_max_block_size;
3414 
3415   // Decide if fast version is enabled
3416   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3417 
3418   assert(number_of_states == 10, "number of tos states should be equal to 9");
3419 
3420   // itos case is frequent and is moved outside table switch
3421   __ cmp(Rflags, itos);
3422 
3423 #ifdef AARCH64
3424   __ b(Lint, eq);
3425 
3426   if (fast_version) {
3427     __ adr(Rtemp, Lbtos);
3428     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3429     __ br(Rtemp);
3430   } else {
3431     __ adr(Rtemp, Ltable);
3432     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3433     __ br(Rtemp);
3434   }
3435 #else
3436   // table switch by type
3437   if (fast_version) {
3438     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3439   } else  {
3440     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3441   }
3442 
3443   // jump to itos case
3444   __ b(Lint);
3445 #endif // AARCH64
3446 
3447   // table with addresses for slow version
3448   if (fast_version) {
3449     // nothing to do
3450   } else  {
3451     AARCH64_ONLY(__ align(wordSize));
3452     __ bind(Ltable);
3453     __ emit_address(Lbtos);
3454     __ emit_address(Lztos);
3455     __ emit_address(Lctos);
3456     __ emit_address(Lstos);
3457     __ emit_address(Litos);
3458     __ emit_address(Lltos);
3459     __ emit_address(Lftos);
3460     __ emit_address(Ldtos);
3461     __ emit_address(Latos);
3462   }
3463 
3464 #ifdef ASSERT
3465   int seq = 0;
3466 #endif
3467   // btos
3468   {
3469     assert(btos == seq++, "btos has unexpected value");
3470     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3471     __ bind(Lbtos);
3472     __ pop(btos);
3473     if (!is_static) pop_and_check_object(Robj);
3474     __ strb(R0_tos, Address(Robj, Roffset));
3475     if (!is_static && rc == may_rewrite) {
3476       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3477     }
3478     __ b(Done);
3479   }
3480 
3481   // ztos
3482   {
3483     assert(ztos == seq++, "ztos has unexpected value");
3484     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3485     __ bind(Lztos);
3486     __ pop(ztos);
3487     if (!is_static) pop_and_check_object(Robj);
3488     __ and_32(R0_tos, R0_tos, 1);
3489     __ strb(R0_tos, Address(Robj, Roffset));
3490     if (!is_static && rc == may_rewrite) {
3491       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3492     }
3493     __ b(Done);
3494   }
3495 
3496   // ctos
3497   {
3498     assert(ctos == seq++, "ctos has unexpected value");
3499     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3500     __ bind(Lctos);
3501     __ pop(ctos);
3502     if (!is_static) pop_and_check_object(Robj);
3503     __ strh(R0_tos, Address(Robj, Roffset));
3504     if (!is_static && rc == may_rewrite) {
3505       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3506     }
3507     __ b(Done);
3508   }
3509 
3510   // stos
3511   {
3512     assert(stos == seq++, "stos has unexpected value");
3513     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3514     __ bind(Lstos);
3515     __ pop(stos);
3516     if (!is_static) pop_and_check_object(Robj);
3517     __ strh(R0_tos, Address(Robj, Roffset));
3518     if (!is_static && rc == may_rewrite) {
3519       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3520     }
3521     __ b(Done);
3522   }
3523 
3524   // itos
3525   {
3526     assert(itos == seq++, "itos has unexpected value");
3527     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3528     __ bind(Litos);
3529     __ b(shouldNotReachHere);
3530   }
3531 
3532   // ltos
3533   {
3534     assert(ltos == seq++, "ltos has unexpected value");
3535     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3536     __ bind(Lltos);
3537     __ pop(ltos);
3538     if (!is_static) pop_and_check_object(Robj);
3539 #ifdef AARCH64
3540     __ str(R0_tos, Address(Robj, Roffset));
3541 #else
3542     __ add(Roffset, Robj, Roffset);
3543     __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3544 #endif // AARCH64
3545     if (!is_static && rc == may_rewrite) {
3546       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3547     }
3548     __ b(Done);
3549   }
3550 
3551   // ftos
3552   {
3553     assert(ftos == seq++, "ftos has unexpected value");
3554     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3555     __ bind(Lftos);
3556     // floats and ints are placed on stack in the same way, so
3557     // we can use pop(itos) to transfer value without using VFP
3558     __ pop(itos);
3559     if (!is_static) pop_and_check_object(Robj);
3560     __ str_32(R0_tos, Address(Robj, Roffset));
3561     if (!is_static && rc == may_rewrite) {
3562       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3563     }
3564     __ b(Done);
3565   }
3566 
3567   // dtos
3568   {
3569     assert(dtos == seq++, "dtos has unexpected value");
3570     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3571     __ bind(Ldtos);
3572     // doubles and longs are placed on stack in the same way, so
3573     // we can use pop(ltos) to transfer value without using VFP
3574     __ pop(ltos);
3575     if (!is_static) pop_and_check_object(Robj);
3576 #ifdef AARCH64
3577     __ str(R0_tos, Address(Robj, Roffset));
3578 #else
3579     __ add(Rtemp, Robj, Roffset);
3580     __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3581 #endif // AARCH64
3582     if (!is_static && rc == may_rewrite) {
3583       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3584     }
3585     __ b(Done);
3586   }
3587 
3588   // atos
3589   {
3590     assert(atos == seq++, "dtos has unexpected value");
3591     __ bind(Latos);
3592     __ pop(atos);
3593     if (!is_static) pop_and_check_object(Robj);
3594     // Store into the field
3595     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3596     if (!is_static && rc == may_rewrite) {
3597       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3598     }
3599     __ b(Done);
3600   }
3601 
3602   __ bind(shouldNotReachHere);
3603   __ should_not_reach_here();
3604 
3605   // itos case is frequent and is moved outside table switch
3606   __ bind(Lint);
3607   __ pop(itos);
3608   if (!is_static) pop_and_check_object(Robj);
3609   __ str_32(R0_tos, Address(Robj, Roffset));
3610   if (!is_static && rc == may_rewrite) {
3611     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3612   }
3613 
3614   __ bind(Done);
3615 
3616   if (gen_volatile_check) {
3617     Label notVolatile;
3618     if (is_static) {
3619       // Just check for volatile. Memory barrier for static final field
3620       // is handled by class initialization.
3621       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3622       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3623       __ bind(notVolatile);
3624     } else {
3625       // Check for volatile field and final field
3626       Label skipMembar;
3627 
3628       __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3629                        1 << ConstantPoolCacheEntry::is_final_shift);
3630       __ b(skipMembar, eq);
3631 
3632       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3633 
3634       // StoreLoad barrier after volatile field write
3635       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3636       __ b(skipMembar);
3637 
3638       // StoreStore barrier after final field write
3639       __ bind(notVolatile);
3640       volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3641 
3642       __ bind(skipMembar);
3643     }
3644   }
3645 
3646 }
3647 
3648 void TemplateTable::putfield(int byte_no) {
3649   putfield_or_static(byte_no, false);
3650 }
3651 
3652 void TemplateTable::nofast_putfield(int byte_no) {
3653   putfield_or_static(byte_no, false, may_not_rewrite);
3654 }
3655 
3656 void TemplateTable::putstatic(int byte_no) {
3657   putfield_or_static(byte_no, true);
3658 }
3659 
3660 
3661 void TemplateTable::jvmti_post_fast_field_mod() {
3662   // This version of jvmti_post_fast_field_mod() is not used on ARM
3663   Unimplemented();
3664 }
3665 
3666 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3667 // but preserves tosca with the given state.
3668 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3669   if (__ can_post_field_modification()) {
3670     // Check to see if a field modification watch has been set before we take
3671     // the time to call into the VM.
3672     Label done;
3673 
3674     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3675     __ cbz(R2, done);
3676 
3677     __ pop_ptr(R3);               // copy the object pointer from tos
3678     __ verify_oop(R3);
3679     __ push_ptr(R3);              // put the object pointer back on tos
3680 
3681     __ push(state);               // save value on the stack
3682 
3683     // access constant pool cache entry
3684     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3685 
3686     __ mov(R1, R3);
3687     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3688     __ mov(R3, Rstack_top); // put tos addr into R3
3689 
3690     // R1: object pointer copied above
3691     // R2: cache entry pointer
3692     // R3: jvalue object on the stack
3693     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3694 
3695     __ pop(state);                // restore value
3696 
3697     __ bind(done);
3698   }
3699 }
3700 
3701 
3702 void TemplateTable::fast_storefield(TosState state) {
3703   transition(state, vtos);
3704 
3705   ByteSize base = ConstantPoolCache::base_offset();
3706 
3707   jvmti_post_fast_field_mod(state);
3708 
3709   const Register Rcache  = R2_tmp;
3710   const Register Rindex  = R3_tmp;
3711   const Register Roffset = R3_tmp;
3712   const Register Rflags  = Rtmp_save0; // R4/R19
3713   const Register Robj    = R5_tmp;
3714 
3715   const bool gen_volatile_check = os::is_MP();
3716 
3717   // access constant pool cache
3718   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3719 
3720   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3721 
3722   if (gen_volatile_check) {
3723     // load flags to test volatile
3724     __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3725   }
3726 
3727   // replace index with field offset from cache entry
3728   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3729 
3730   if (gen_volatile_check) {
3731     // Check for volatile store
3732     Label notVolatile;
3733     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3734 
3735     // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3736     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3737 
3738     __ bind(notVolatile);
3739   }
3740 
3741   // Get object from stack
3742   pop_and_check_object(Robj);
3743 
3744   // access field
3745   switch (bytecode()) {
3746     case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3747                                      // fall through
3748     case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3749     case Bytecodes::_fast_sputfield: // fall through
3750     case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3751     case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3752 #ifdef AARCH64
3753     case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
3754     case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3755     case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3756 #else
3757     case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3758                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3759 
3760 #ifdef __SOFTFP__
3761     case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
3762     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3763                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3764 #else
3765     case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3766                                      __ fsts(S0_tos, Address(Robj));          break;
3767     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3768                                      __ fstd(D0_tos, Address(Robj));          break;
3769 #endif // __SOFTFP__
3770 #endif // AARCH64
3771 
3772     case Bytecodes::_fast_aputfield:
3773       do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3774       break;
3775 
3776     default:
3777       ShouldNotReachHere();
3778   }
3779 
3780   if (gen_volatile_check) {
3781     Label notVolatile;
3782     Label skipMembar;
3783     __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3784                    1 << ConstantPoolCacheEntry::is_final_shift);
3785     __ b(skipMembar, eq);
3786 
3787     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3788 
3789     // StoreLoad barrier after volatile field write
3790     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3791     __ b(skipMembar);
3792 
3793     // StoreStore barrier after final field write
3794     __ bind(notVolatile);
3795     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3796 
3797     __ bind(skipMembar);
3798   }
3799 }
3800 
3801 
3802 void TemplateTable::fast_accessfield(TosState state) {
3803   transition(atos, state);
3804 
3805   // do the JVMTI work here to avoid disturbing the register state below
3806   if (__ can_post_field_access()) {
3807     // Check to see if a field access watch has been set before we take
3808     // the time to call into the VM.
3809     Label done;
3810     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3811     __ cbz(R2, done);
3812     // access constant pool cache entry
3813     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3814     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3815     __ verify_oop(R0_tos);
3816     __ mov(R1, R0_tos);
3817     // R1: object pointer copied above
3818     // R2: cache entry pointer
3819     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3820     __ pop_ptr(R0_tos);   // restore object pointer
3821 
3822     __ bind(done);
3823   }
3824 
3825   const Register Robj    = R0_tos;
3826   const Register Rcache  = R2_tmp;
3827   const Register Rflags  = R2_tmp;
3828   const Register Rindex  = R3_tmp;
3829   const Register Roffset = R3_tmp;
3830 
3831   const bool gen_volatile_check = os::is_MP();
3832 
3833   // access constant pool cache
3834   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3835   // replace index with field offset from cache entry
3836   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3837   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3838 
3839   if (gen_volatile_check) {
3840     // load flags to test volatile
3841     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3842   }
3843 
3844   __ verify_oop(Robj);
3845   __ null_check(Robj, Rtemp);
3846 
3847   // access field
3848   switch (bytecode()) {
3849     case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
3850     case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
3851     case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
3852     case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
3853 #ifdef AARCH64
3854     case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3855     case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
3856     case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
3857 #else
3858     case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
3859                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3860 #ifdef __SOFTFP__
3861     case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3862     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
3863                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3864 #else
3865     case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
3866     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
3867 #endif // __SOFTFP__
3868 #endif // AARCH64
3869     case Bytecodes::_fast_agetfield: do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
3870     default:
3871       ShouldNotReachHere();
3872   }
3873 
3874   if (gen_volatile_check) {
3875     // Check for volatile load
3876     Label notVolatile;
3877     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3878 
3879     // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
3880     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3881 
3882     __ bind(notVolatile);
3883   }
3884 }
3885 
3886 
3887 void TemplateTable::fast_xaccess(TosState state) {
3888   transition(vtos, state);
3889 
3890   const Register Robj = R1_tmp;
3891   const Register Rcache = R2_tmp;
3892   const Register Rindex = R3_tmp;
3893   const Register Roffset = R3_tmp;
3894   const Register Rflags = R4_tmp;
3895   Label done;
3896 
3897   // get receiver
3898   __ ldr(Robj, aaddress(0));
3899 
3900   // access constant pool cache
3901   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
3902   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3903   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3904 
3905   const bool gen_volatile_check = os::is_MP();
3906 
3907   if (gen_volatile_check) {
3908     // load flags to test volatile
3909     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3910   }
3911 
3912   // make sure exception is reported in correct bcp range (getfield is next instruction)
3913   __ add(Rbcp, Rbcp, 1);
3914   __ null_check(Robj, Rtemp);
3915   __ sub(Rbcp, Rbcp, 1);
3916 
3917 #ifdef AARCH64
3918   if (gen_volatile_check) {
3919     Label notVolatile;
3920     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3921 
3922     __ add(Rtemp, Robj, Roffset);
3923 
3924     if (state == itos) {
3925       __ ldar_w(R0_tos, Rtemp);
3926     } else if (state == atos) {
3927       if (UseCompressedOops) {
3928         __ ldar_w(R0_tos, Rtemp);
3929         __ decode_heap_oop(R0_tos);
3930       } else {
3931         __ ldar(R0_tos, Rtemp);
3932       }
3933       __ verify_oop(R0_tos);
3934     } else if (state == ftos) {
3935       __ ldar_w(R0_tos, Rtemp);
3936       __ fmov_sw(S0_tos, R0_tos);
3937     } else {
3938       ShouldNotReachHere();
3939     }
3940     __ b(done);
3941 
3942     __ bind(notVolatile);
3943   }
3944 #endif // AARCH64
3945 
3946   if (state == itos) {
3947     __ ldr_s32(R0_tos, Address(Robj, Roffset));
3948   } else if (state == atos) {
3949     do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3950     __ verify_oop(R0_tos);
3951   } else if (state == ftos) {
3952 #ifdef AARCH64
3953     __ ldr_s(S0_tos, Address(Robj, Roffset));
3954 #else
3955 #ifdef __SOFTFP__
3956     __ ldr(R0_tos, Address(Robj, Roffset));
3957 #else
3958     __ add(Roffset, Robj, Roffset);
3959     __ flds(S0_tos, Address(Roffset));
3960 #endif // __SOFTFP__
3961 #endif // AARCH64
3962   } else {
3963     ShouldNotReachHere();
3964   }
3965 
3966 #ifndef AARCH64
3967   if (gen_volatile_check) {
3968     // Check for volatile load
3969     Label notVolatile;
3970     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3971 
3972     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3973 
3974     __ bind(notVolatile);
3975   }
3976 #endif // !AARCH64
3977 
3978   __ bind(done);
3979 }
3980 
3981 
3982 
3983 //----------------------------------------------------------------------------------------------------
3984 // Calls
3985 
3986 void TemplateTable::count_calls(Register method, Register temp) {
3987   // implemented elsewhere
3988   ShouldNotReachHere();
3989 }
3990 
3991 
3992 void TemplateTable::prepare_invoke(int byte_no,
3993                                    Register method,  // linked method (or i-klass)
3994                                    Register index,   // itable index, MethodType, etc.
3995                                    Register recv,    // if caller wants to see it
3996                                    Register flags    // if caller wants to test it
3997                                    ) {
3998   // determine flags
3999   const Bytecodes::Code code = bytecode();
4000   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
4001   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
4002   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
4003   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
4004   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
4005   const bool load_receiver       = (recv != noreg);
4006   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4007   assert(recv  == noreg || recv  == R2, "");
4008   assert(flags == noreg || flags == R3, "");
4009 
4010   // setup registers & access constant pool cache
4011   if (recv  == noreg)  recv  = R2;
4012   if (flags == noreg)  flags = R3;
4013   const Register temp = Rtemp;
4014   const Register ret_type = R1_tmp;
4015   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4016 
4017   // save 'interpreter return address'
4018   __ save_bcp();
4019 
4020   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4021 
4022   // maybe push extra argument
4023   if (is_invokedynamic || is_invokehandle) {
4024     Label L_no_push;
4025     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4026     __ mov(temp, index);
4027     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4028     __ load_resolved_reference_at_index(index, temp);
4029     __ verify_oop(index);
4030     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
4031     __ bind(L_no_push);
4032   }
4033 
4034   // load receiver if needed (after extra argument is pushed so parameter size is correct)
4035   if (load_receiver) {
4036     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
4037     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4038     __ ldr(recv, recv_addr);
4039     __ verify_oop(recv);
4040   }
4041 
4042   // compute return type
4043   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4044   // Make sure we don't need to mask flags after the above shift
4045   ConstantPoolCacheEntry::verify_tos_state_shift();
4046   // load return address
4047   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4048     __ mov_slow(temp, table);
4049     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4050   }
4051 }
4052 
4053 
4054 void TemplateTable::invokevirtual_helper(Register index,
4055                                          Register recv,
4056                                          Register flags) {
4057 
4058   const Register recv_klass = R2_tmp;
4059 
4060   assert_different_registers(index, recv, flags, Rtemp);
4061   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4062 
4063   // Test for an invoke of a final method
4064   Label notFinal;
4065   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4066 
4067   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4068 
4069   // do the call - the index is actually the method to call
4070 
4071   // It's final, need a null check here!
4072   __ null_check(recv, Rtemp);
4073 
4074   // profile this call
4075   __ profile_final_call(R0_tmp);
4076 
4077   __ jump_from_interpreted(Rmethod);
4078 
4079   __ bind(notFinal);
4080 
4081   // get receiver klass
4082   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4083   __ load_klass(recv_klass, recv);
4084 
4085   // profile this call
4086   __ profile_virtual_call(R0_tmp, recv_klass);
4087 
4088   // get target Method* & entry point
4089   const int base = in_bytes(Klass::vtable_start_offset());
4090   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4091   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4092   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4093   __ jump_from_interpreted(Rmethod);
4094 }
4095 
4096 void TemplateTable::invokevirtual(int byte_no) {
4097   transition(vtos, vtos);
4098   assert(byte_no == f2_byte, "use this argument");
4099 
4100   const Register Rrecv  = R2_tmp;
4101   const Register Rflags = R3_tmp;
4102 
4103   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4104 
4105   // Rmethod: index
4106   // Rrecv:   receiver
4107   // Rflags:  flags
4108   // LR:      return address
4109 
4110   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4111 }
4112 
4113 
4114 void TemplateTable::invokespecial(int byte_no) {
4115   transition(vtos, vtos);
4116   assert(byte_no == f1_byte, "use this argument");
4117   const Register Rrecv  = R2_tmp;
4118   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4119   __ verify_oop(Rrecv);
4120   __ null_check(Rrecv, Rtemp);
4121   // do the call
4122   __ profile_call(Rrecv);
4123   __ jump_from_interpreted(Rmethod);
4124 }
4125 
4126 
4127 void TemplateTable::invokestatic(int byte_no) {
4128   transition(vtos, vtos);
4129   assert(byte_no == f1_byte, "use this argument");
4130   prepare_invoke(byte_no, Rmethod);
4131   // do the call
4132   __ profile_call(R2_tmp);
4133   __ jump_from_interpreted(Rmethod);
4134 }
4135 
4136 
4137 void TemplateTable::fast_invokevfinal(int byte_no) {
4138   transition(vtos, vtos);
4139   assert(byte_no == f2_byte, "use this argument");
4140   __ stop("fast_invokevfinal is not used on ARM");
4141 }
4142 
4143 
4144 void TemplateTable::invokeinterface(int byte_no) {
4145   transition(vtos, vtos);
4146   assert(byte_no == f1_byte, "use this argument");
4147 
4148   const Register Ritable = R1_tmp;
4149   const Register Rrecv   = R2_tmp;
4150   const Register Rinterf = R5_tmp;
4151   const Register Rindex  = R4_tmp;
4152   const Register Rflags  = R3_tmp;
4153   const Register Rklass  = R3_tmp;
4154 
4155   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4156 
4157   // Special case of invokeinterface called for virtual method of
4158   // java.lang.Object.  See cpCacheOop.cpp for details.
4159   // This code isn't produced by javac, but could be produced by
4160   // another compliant java compiler.
4161   Label notMethod;
4162   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
4163 
4164   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4165   __ bind(notMethod);
4166 
4167   // Get receiver klass into Rklass - also a null check
4168   __ load_klass(Rklass, Rrecv);
4169 
4170   Label no_such_interface;
4171 
4172   // Receiver subtype check against REFC.
4173   __ lookup_interface_method(// inputs: rec. class, interface
4174                              Rklass, Rinterf, noreg,
4175                              // outputs:  scan temp. reg1, scan temp. reg2
4176                              noreg, Ritable, Rtemp,
4177                              no_such_interface);
4178 
4179   // profile this call
4180   __ profile_virtual_call(R0_tmp, Rklass);
4181 
4182   // Get declaring interface class from method
4183   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4184   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4185   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4186 
4187   // Get itable index from method
4188   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4189   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4190   __ neg(Rindex, Rtemp);
4191 
4192   __ lookup_interface_method(// inputs: rec. class, interface
4193                              Rklass, Rinterf, Rindex,
4194                              // outputs:  scan temp. reg1, scan temp. reg2
4195                              Rmethod, Ritable, Rtemp,
4196                              no_such_interface);
4197 
4198   // Rmethod: Method* to call
4199 
4200   // Check for abstract method error
4201   // Note: This should be done more efficiently via a throw_abstract_method_error
4202   //       interpreter entry point and a conditional jump to it in case of a null
4203   //       method.
4204   { Label L;
4205     __ cbnz(Rmethod, L);
4206     // throw exception
4207     // note: must restore interpreter registers to canonical
4208     //       state for exception handling to work correctly!
4209     __ restore_method();
4210     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4211     // the call_VM checks for exception, so we should never return here.
4212     __ should_not_reach_here();
4213     __ bind(L);
4214   }
4215 
4216   // do the call
4217   __ jump_from_interpreted(Rmethod);
4218 
4219   // throw exception
4220   __ bind(no_such_interface);
4221   __ restore_method();
4222   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4223   // the call_VM checks for exception, so we should never return here.
4224   __ should_not_reach_here();
4225 }
4226 
4227 void TemplateTable::invokehandle(int byte_no) {
4228   transition(vtos, vtos);
4229 
4230   // TODO-AARCH64 review register usage
4231   const Register Rrecv  = R2_tmp;
4232   const Register Rmtype = R4_tmp;
4233   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4234 
4235   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4236   __ null_check(Rrecv, Rtemp);
4237 
4238   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
4239   // Rmethod: MH.invokeExact_MT method (from f2)
4240 
4241   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
4242 
4243   // do the call
4244   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
4245   __ mov(Rmethod, R5_method);
4246   __ jump_from_interpreted(Rmethod);
4247 }
4248 
4249 void TemplateTable::invokedynamic(int byte_no) {
4250   transition(vtos, vtos);
4251 
4252   // TODO-AARCH64 review register usage
4253   const Register Rcallsite = R4_tmp;
4254   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4255 
4256   prepare_invoke(byte_no, R5_method, Rcallsite);
4257 
4258   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4259   // Rmethod:   MH.linkToCallSite method (from f2)
4260 
4261   // Note:  Rcallsite is already pushed by prepare_invoke
4262 
4263   if (ProfileInterpreter) {
4264     __ profile_call(R2_tmp);
4265   }
4266 
4267   // do the call
4268   __ mov(Rmethod, R5_method);
4269   __ jump_from_interpreted(Rmethod);
4270 }
4271 
4272 //----------------------------------------------------------------------------------------------------
4273 // Allocation
4274 
4275 void TemplateTable::_new() {
4276   transition(vtos, atos);
4277 
4278   const Register Robj   = R0_tos;
4279   const Register Rcpool = R1_tmp;
4280   const Register Rindex = R2_tmp;
4281   const Register Rtags  = R3_tmp;
4282   const Register Rsize  = R3_tmp;
4283 
4284   Register Rklass = R4_tmp;
4285   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4286   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4287 
4288   Label slow_case;
4289   Label done;
4290   Label initialize_header;
4291   Label initialize_object;  // including clearing the fields
4292 
4293   const bool allow_shared_alloc =
4294     Universe::heap()->supports_inline_contig_alloc();
4295 
4296   // Literals
4297   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4298 
4299   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4300   __ get_cpool_and_tags(Rcpool, Rtags);
4301 
4302   // Make sure the class we're about to instantiate has been resolved.
4303   // This is done before loading InstanceKlass to be consistent with the order
4304   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4305   const int tags_offset = Array<u1>::base_offset_in_bytes();
4306   __ add(Rtemp, Rtags, Rindex);
4307 
4308 #ifdef AARCH64
4309   __ add(Rtemp, Rtemp, tags_offset);
4310   __ ldarb(Rtemp, Rtemp);
4311 #else
4312   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4313 
4314   // use Rklass as a scratch
4315   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4316 #endif // AARCH64
4317 
4318   // get InstanceKlass
4319   __ cmp(Rtemp, JVM_CONSTANT_Class);
4320   __ b(slow_case, ne);
4321   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4322 
4323   // make sure klass is initialized & doesn't have finalizer
4324   // make sure klass is fully initialized
4325   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4326   __ cmp(Rtemp, InstanceKlass::fully_initialized);
4327   __ b(slow_case, ne);
4328 
4329   // get instance_size in InstanceKlass (scaled to a count of bytes)
4330   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4331 
4332   // test to see if it has a finalizer or is malformed in some way
4333   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4334   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4335 
4336   // Allocate the instance:
4337   //  If TLAB is enabled:
4338   //    Try to allocate in the TLAB.
4339   //    If fails, go to the slow path.
4340   //  Else If inline contiguous allocations are enabled:
4341   //    Try to allocate in eden.
4342   //    If fails due to heap end, go to slow path.
4343   //
4344   //  If TLAB is enabled OR inline contiguous is enabled:
4345   //    Initialize the allocation.
4346   //    Exit.
4347   //
4348   //  Go to slow path.
4349   if (UseTLAB) {
4350     const Register Rtlab_top = R1_tmp;
4351     const Register Rtlab_end = R2_tmp;
4352     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4353 
4354     __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4355     __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset())));
4356     __ add(Rtlab_top, Robj, Rsize);
4357     __ cmp(Rtlab_top, Rtlab_end);
4358     __ b(slow_case, hi);
4359     __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4360     if (ZeroTLAB) {
4361       // the fields have been already cleared
4362       __ b(initialize_header);
4363     } else {
4364       // initialize both the header and fields
4365       __ b(initialize_object);
4366     }
4367   } else {
4368     // Allocation in the shared Eden, if allowed.
4369     if (allow_shared_alloc) {
4370       const Register Rheap_top_addr = R2_tmp;
4371       const Register Rheap_top = R5_tmp;
4372       const Register Rheap_end = Rtemp;
4373       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4374 
4375       // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4376       __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4377 
4378       Label retry;
4379       __ bind(retry);
4380 
4381 #ifdef AARCH64
4382       __ ldxr(Robj, Rheap_top_addr);
4383 #else
4384       __ ldr(Robj, Address(Rheap_top_addr));
4385 #endif // AARCH64
4386 
4387       __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4388       __ add(Rheap_top, Robj, Rsize);
4389       __ cmp(Rheap_top, Rheap_end);
4390       __ b(slow_case, hi);
4391 
4392       // Update heap top atomically.
4393       // If someone beats us on the allocation, try again, otherwise continue.
4394 #ifdef AARCH64
4395       __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4396       __ cbnz_w(Rtemp2, retry);
4397 #else
4398       __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4399       __ b(retry, ne);
4400 #endif // AARCH64
4401 
4402       __ incr_allocated_bytes(Rsize, Rtemp);
4403     }
4404   }
4405 
4406   if (UseTLAB || allow_shared_alloc) {
4407     const Register Rzero0 = R1_tmp;
4408     const Register Rzero1 = R2_tmp;
4409     const Register Rzero_end = R5_tmp;
4410     const Register Rzero_cur = Rtemp;
4411     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4412 
4413     // The object is initialized before the header.  If the object size is
4414     // zero, go directly to the header initialization.
4415     __ bind(initialize_object);
4416     __ subs(Rsize, Rsize, sizeof(oopDesc));
4417     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4418     __ b(initialize_header, eq);
4419 
4420 #ifdef ASSERT
4421     // make sure Rsize is a multiple of 8
4422     Label L;
4423     __ tst(Rsize, 0x07);
4424     __ b(L, eq);
4425     __ stop("object size is not multiple of 8 - adjust this code");
4426     __ bind(L);
4427 #endif
4428 
4429 #ifdef AARCH64
4430     {
4431       Label loop;
4432       // Step back by 1 word if object size is not a multiple of 2*wordSize.
4433       assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4434       __ andr(Rtemp2, Rsize, (uintx)wordSize);
4435       __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4436 
4437       // Zero by 2 words per iteration.
4438       __ bind(loop);
4439       __ subs(Rsize, Rsize, 2*wordSize);
4440       __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4441       __ b(loop, gt);
4442     }
4443 #else
4444     __ mov(Rzero0, 0);
4445     __ mov(Rzero1, 0);
4446     __ add(Rzero_end, Rzero_cur, Rsize);
4447 
4448     // initialize remaining object fields: Rsize was a multiple of 8
4449     { Label loop;
4450       // loop is unrolled 2 times
4451       __ bind(loop);
4452       // #1
4453       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4454       __ cmp(Rzero_cur, Rzero_end);
4455       // #2
4456       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4457       __ cmp(Rzero_cur, Rzero_end, ne);
4458       __ b(loop, ne);
4459     }
4460 #endif // AARCH64
4461 
4462     // initialize object header only.
4463     __ bind(initialize_header);
4464     if (UseBiasedLocking) {
4465       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4466     } else {
4467       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4468     }
4469     // mark
4470     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4471 
4472     // klass
4473 #ifdef AARCH64
4474     __ store_klass_gap(Robj);
4475 #endif // AARCH64
4476     __ store_klass(Rklass, Robj); // blows Rklass:
4477     Rklass = noreg;
4478 
4479     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4480     if (DTraceAllocProbes) {
4481       // Trigger dtrace event for fastpath
4482       Label Lcontinue;
4483 
4484       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4485       __ cbz(Rtemp, Lcontinue);
4486 
4487       __ push(atos);
4488       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4489       __ pop(atos);
4490 
4491       __ bind(Lcontinue);
4492     }
4493 
4494     __ b(done);
4495   } else {
4496     // jump over literals
4497     __ b(slow_case);
4498   }
4499 
4500   if (allow_shared_alloc) {
4501     __ bind_literal(Lheap_top_addr);
4502   }
4503 
4504   // slow case
4505   __ bind(slow_case);
4506   __ get_constant_pool(Rcpool);
4507   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4508   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4509 
4510   // continue
4511   __ bind(done);
4512 
4513   // StoreStore barrier required after complete initialization
4514   // (headers + content zeroing), before the object may escape.
4515   __ membar(MacroAssembler::StoreStore, R1_tmp);
4516 }
4517 
4518 
4519 void TemplateTable::newarray() {
4520   transition(itos, atos);
4521   __ ldrb(R1, at_bcp(1));
4522   __ mov(R2, R0_tos);
4523   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4524   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4525 }
4526 
4527 
4528 void TemplateTable::anewarray() {
4529   transition(itos, atos);
4530   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4531   __ get_constant_pool(R1);
4532   __ mov(R3, R0_tos);
4533   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4534   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4535 }
4536 
4537 
4538 void TemplateTable::arraylength() {
4539   transition(atos, itos);
4540   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4541   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4542 }
4543 
4544 
4545 void TemplateTable::checkcast() {
4546   transition(atos, atos);
4547   Label done, is_null, quicked, resolved, throw_exception;
4548 
4549   const Register Robj = R0_tos;
4550   const Register Rcpool = R2_tmp;
4551   const Register Rtags = R3_tmp;
4552   const Register Rindex = R4_tmp;
4553   const Register Rsuper = R3_tmp;
4554   const Register Rsub   = R4_tmp;
4555   const Register Rsubtype_check_tmp1 = R1_tmp;
4556   const Register Rsubtype_check_tmp2 = LR_tmp;
4557 
4558   __ cbz(Robj, is_null);
4559 
4560   // Get cpool & tags index
4561   __ get_cpool_and_tags(Rcpool, Rtags);
4562   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4563 
4564   // See if bytecode has already been quicked
4565   __ add(Rtemp, Rtags, Rindex);
4566 #ifdef AARCH64
4567   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4568   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4569   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4570 #else
4571   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4572 #endif // AARCH64
4573 
4574   __ cmp(Rtemp, JVM_CONSTANT_Class);
4575 
4576 #ifndef AARCH64
4577   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4578 #endif // !AARCH64
4579 
4580   __ b(quicked, eq);
4581 
4582   __ push(atos);
4583   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4584   // vm_result_2 has metadata result
4585   __ get_vm_result_2(Rsuper, Robj);
4586   __ pop_ptr(Robj);
4587   __ b(resolved);
4588 
4589   __ bind(throw_exception);
4590   // Come here on failure of subtype check
4591   __ profile_typecheck_failed(R1_tmp);
4592   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4593   __ b(Interpreter::_throw_ClassCastException_entry);
4594 
4595   // Get superklass in Rsuper and subklass in Rsub
4596   __ bind(quicked);
4597   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4598 
4599   __ bind(resolved);
4600   __ load_klass(Rsub, Robj);
4601 
4602   // Generate subtype check. Blows both tmps and Rtemp.
4603   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4604   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4605 
4606   // Come here on success
4607 
4608   // Collect counts on whether this check-cast sees NULLs a lot or not.
4609   if (ProfileInterpreter) {
4610     __ b(done);
4611     __ bind(is_null);
4612     __ profile_null_seen(R1_tmp);
4613   } else {
4614     __ bind(is_null);   // same as 'done'
4615   }
4616   __ bind(done);
4617 }
4618 
4619 
4620 void TemplateTable::instanceof() {
4621   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4622   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4623 
4624   transition(atos, itos);
4625   Label done, is_null, not_subtype, quicked, resolved;
4626 
4627   const Register Robj = R0_tos;
4628   const Register Rcpool = R2_tmp;
4629   const Register Rtags = R3_tmp;
4630   const Register Rindex = R4_tmp;
4631   const Register Rsuper = R3_tmp;
4632   const Register Rsub   = R4_tmp;
4633   const Register Rsubtype_check_tmp1 = R0_tmp;
4634   const Register Rsubtype_check_tmp2 = R1_tmp;
4635 
4636   __ cbz(Robj, is_null);
4637 
4638   __ load_klass(Rsub, Robj);
4639 
4640   // Get cpool & tags index
4641   __ get_cpool_and_tags(Rcpool, Rtags);
4642   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4643 
4644   // See if bytecode has already been quicked
4645   __ add(Rtemp, Rtags, Rindex);
4646 #ifdef AARCH64
4647   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4648   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4649   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4650 #else
4651   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4652 #endif // AARCH64
4653   __ cmp(Rtemp, JVM_CONSTANT_Class);
4654 
4655 #ifndef AARCH64
4656   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4657 #endif // !AARCH64
4658 
4659   __ b(quicked, eq);
4660 
4661   __ push(atos);
4662   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4663   // vm_result_2 has metadata result
4664   __ get_vm_result_2(Rsuper, Robj);
4665   __ pop_ptr(Robj);
4666   __ b(resolved);
4667 
4668   // Get superklass in Rsuper and subklass in Rsub
4669   __ bind(quicked);
4670   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4671 
4672   __ bind(resolved);
4673   __ load_klass(Rsub, Robj);
4674 
4675   // Generate subtype check. Blows both tmps and Rtemp.
4676   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4677 
4678   // Come here on success
4679   __ mov(R0_tos, 1);
4680   __ b(done);
4681 
4682   __ bind(not_subtype);
4683   // Come here on failure
4684   __ profile_typecheck_failed(R1_tmp);
4685   __ mov(R0_tos, 0);
4686 
4687   // Collect counts on whether this test sees NULLs a lot or not.
4688   if (ProfileInterpreter) {
4689     __ b(done);
4690     __ bind(is_null);
4691     __ profile_null_seen(R1_tmp);
4692   } else {
4693     __ bind(is_null);   // same as 'done'
4694   }
4695   __ bind(done);
4696 }
4697 
4698 
4699 //----------------------------------------------------------------------------------------------------
4700 // Breakpoints
4701 void TemplateTable::_breakpoint() {
4702 
4703   // Note: We get here even if we are single stepping..
4704   // jbug inists on setting breakpoints at every bytecode
4705   // even if we are in single step mode.
4706 
4707   transition(vtos, vtos);
4708 
4709   // get the unpatched byte code
4710   __ mov(R1, Rmethod);
4711   __ mov(R2, Rbcp);
4712   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4713 #ifdef AARCH64
4714   __ sxtw(Rtmp_save0, R0);
4715 #else
4716   __ mov(Rtmp_save0, R0);
4717 #endif // AARCH64
4718 
4719   // post the breakpoint event
4720   __ mov(R1, Rmethod);
4721   __ mov(R2, Rbcp);
4722   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4723 
4724   // complete the execution of original bytecode
4725   __ mov(R3_bytecode, Rtmp_save0);
4726   __ dispatch_only_normal(vtos);
4727 }
4728 
4729 
4730 //----------------------------------------------------------------------------------------------------
4731 // Exceptions
4732 
4733 void TemplateTable::athrow() {
4734   transition(atos, vtos);
4735   __ mov(Rexception_obj, R0_tos);
4736   __ null_check(Rexception_obj, Rtemp);
4737   __ b(Interpreter::throw_exception_entry());
4738 }
4739 
4740 
4741 //----------------------------------------------------------------------------------------------------
4742 // Synchronization
4743 //
4744 // Note: monitorenter & exit are symmetric routines; which is reflected
4745 //       in the assembly code structure as well
4746 //
4747 // Stack layout:
4748 //
4749 // [expressions  ] <--- Rstack_top        = expression stack top
4750 // ..
4751 // [expressions  ]
4752 // [monitor entry] <--- monitor block top = expression stack bot
4753 // ..
4754 // [monitor entry]
4755 // [frame data   ] <--- monitor block bot
4756 // ...
4757 // [saved FP     ] <--- FP
4758 
4759 
4760 void TemplateTable::monitorenter() {
4761   transition(atos, vtos);
4762 
4763   const Register Robj = R0_tos;
4764   const Register Rentry = R1_tmp;
4765 
4766   // check for NULL object
4767   __ null_check(Robj, Rtemp);
4768 
4769   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4770   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4771   Label allocate_monitor, allocated;
4772 
4773   // initialize entry pointer
4774   __ mov(Rentry, 0);                             // points to free slot or NULL
4775 
4776   // find a free slot in the monitor block (result in Rentry)
4777   { Label loop, exit;
4778     const Register Rcur = R2_tmp;
4779     const Register Rcur_obj = Rtemp;
4780     const Register Rbottom = R3_tmp;
4781     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4782 
4783     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4784                                  // points to current entry, starting with top-most entry
4785     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4786                                  // points to word before bottom of monitor block
4787 
4788     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4789 #ifndef AARCH64
4790     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4791                                                  // prefetch monitor's object for the first iteration
4792 #endif // !AARCH64
4793     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4794 
4795     __ bind(loop);
4796 #ifdef AARCH64
4797     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4798 #endif // AARCH64
4799     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4800     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4801 
4802     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4803     __ b(exit, eq);                              // if same object then stop searching
4804 
4805     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4806 
4807     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4808 #ifndef AARCH64
4809     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4810                                                  // prefetch monitor's object for the next iteration
4811 #endif // !AARCH64
4812     __ b(loop, ne);                              // if not at bottom then check this entry
4813     __ bind(exit);
4814   }
4815 
4816   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4817 
4818   __ bind(allocate_monitor);
4819 
4820   // allocate one if there's no free slot
4821   { Label loop;
4822     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4823 
4824     // 1. compute new pointers
4825 
4826 #ifdef AARCH64
4827     __ check_extended_sp(Rtemp);
4828     __ sub(SP, SP, entry_size);                  // adjust extended SP
4829     __ mov(Rtemp, SP);
4830     __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
4831 #endif // AARCH64
4832 
4833     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4834                                                  // old monitor block top / expression stack bottom
4835 
4836     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4837     __ check_stack_top_on_expansion();
4838 
4839     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4840 
4841     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4842 
4843     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4844                                                  // set new monitor block top
4845 
4846     // 2. move expression stack contents
4847 
4848     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4849 #ifndef AARCH64
4850     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4851 #endif // !AARCH64
4852     __ b(allocated, eq);
4853 
4854     __ bind(loop);
4855 #ifdef AARCH64
4856     __ ldr(Rtemp, Address(R2_tmp, entry_size));             // load expression stack word from old location
4857 #endif // AARCH64
4858     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4859                                                             // and advance to next word
4860     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4861 #ifndef AARCH64
4862     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4863 #endif // !AARCH64
4864     __ b(loop, ne);                                         // if not at bottom then copy next word
4865   }
4866 
4867   // call run-time routine
4868 
4869   // Rentry: points to monitor entry
4870   __ bind(allocated);
4871 
4872   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4873   // The object has already been poped from the stack, so the expression stack looks correct.
4874   __ add(Rbcp, Rbcp, 1);
4875 
4876   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
4877   __ lock_object(Rentry);
4878 
4879   // check to make sure this monitor doesn't cause stack overflow after locking
4880   __ save_bcp();  // in case of exception
4881   __ arm_stack_overflow_check(0, Rtemp);
4882 
4883   // The bcp has already been incremented. Just need to dispatch to next instruction.
4884   __ dispatch_next(vtos);
4885 }
4886 
4887 
4888 void TemplateTable::monitorexit() {
4889   transition(atos, vtos);
4890 
4891   const Register Robj = R0_tos;
4892   const Register Rcur = R1_tmp;
4893   const Register Rbottom = R2_tmp;
4894   const Register Rcur_obj = Rtemp;
4895 
4896   // check for NULL object
4897   __ null_check(Robj, Rtemp);
4898 
4899   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4900   Label found, throw_exception;
4901 
4902   // find matching slot
4903   { Label loop;
4904     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4905 
4906     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4907                                  // points to current entry, starting with top-most entry
4908     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4909                                  // points to word before bottom of monitor block
4910 
4911     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4912 #ifndef AARCH64
4913     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4914                                                  // prefetch monitor's object for the first iteration
4915 #endif // !AARCH64
4916     __ b(throw_exception, eq);                   // throw exception if there are now monitors
4917 
4918     __ bind(loop);
4919 #ifdef AARCH64
4920     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4921 #endif // AARCH64
4922     // check if current entry is for same object
4923     __ cmp(Rcur_obj, Robj);
4924     __ b(found, eq);                             // if same object then stop searching
4925     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4926     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4927 #ifndef AARCH64
4928     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4929 #endif // !AARCH64
4930     __ b (loop, ne);                             // if not at bottom then check this entry
4931   }
4932 
4933   // error handling. Unlocking was not block-structured
4934   __ bind(throw_exception);
4935   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4936   __ should_not_reach_here();
4937 
4938   // call run-time routine
4939   // Rcur: points to monitor entry
4940   __ bind(found);
4941   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
4942   __ unlock_object(Rcur);
4943   __ pop_ptr(Robj);                              // discard object
4944 }
4945 
4946 
4947 //----------------------------------------------------------------------------------------------------
4948 // Wide instructions
4949 
4950 void TemplateTable::wide() {
4951   transition(vtos, vtos);
4952   __ ldrb(R3_bytecode, at_bcp(1));
4953 
4954   InlinedAddress Ltable((address)Interpreter::_wentry_point);
4955   __ ldr_literal(Rtemp, Ltable);
4956   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
4957 
4958   __ nop(); // to avoid filling CPU pipeline with invalid instructions
4959   __ nop();
4960   __ bind_literal(Ltable);
4961 }
4962 
4963 
4964 //----------------------------------------------------------------------------------------------------
4965 // Multi arrays
4966 
4967 void TemplateTable::multianewarray() {
4968   transition(vtos, atos);
4969   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
4970 
4971   // last dim is on top of stack; we want address of first one:
4972   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
4973   // the latter wordSize to point to the beginning of the array.
4974   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4975   __ sub(R1, Rtemp, wordSize);
4976 
4977   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
4978   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4979   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4980 }