1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "interpreter/interp_masm.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/templateTable.hpp"
  31 #include "memory/universe.hpp"
  32 #include "oops/cpCache.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/synchronizer.hpp"
  41 
  42 #define __ _masm->
  43 
  44 //----------------------------------------------------------------------------------------------------
  45 // Platform-dependent initialization
  46 
  47 void TemplateTable::pd_initialize() {
  48   // No arm specific initialization
  49 }
  50 
  51 //----------------------------------------------------------------------------------------------------
  52 // Address computation
  53 
  54 // local variables
  55 static inline Address iaddress(int n)            {
  56   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  57 }
  58 
  59 static inline Address laddress(int n)            { return iaddress(n + 1); }
  60 #ifndef AARCH64
  61 static inline Address haddress(int n)            { return iaddress(n + 0); }
  62 #endif // !AARCH64
  63 
  64 static inline Address faddress(int n)            { return iaddress(n); }
  65 static inline Address daddress(int n)            { return laddress(n); }
  66 static inline Address aaddress(int n)            { return iaddress(n); }
  67 
  68 
  69 void TemplateTable::get_local_base_addr(Register r, Register index) {
  70   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  71 }
  72 
  73 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  74 #ifdef AARCH64
  75   get_local_base_addr(scratch, index);
  76   return Address(scratch);
  77 #else
  78   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  79 #endif // AARCH64
  80 }
  81 
  82 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  83   return load_iaddress(index, scratch);
  84 }
  85 
  86 Address TemplateTable::load_faddress(Register index, Register scratch) {
  87 #ifdef __SOFTFP__
  88   return load_iaddress(index, scratch);
  89 #else
  90   get_local_base_addr(scratch, index);
  91   return Address(scratch);
  92 #endif // __SOFTFP__
  93 }
  94 
  95 Address TemplateTable::load_daddress(Register index, Register scratch) {
  96   get_local_base_addr(scratch, index);
  97   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  98 }
  99 
 100 // At top of Java expression stack which may be different than SP.
 101 // It isn't for category 1 objects.
 102 static inline Address at_tos() {
 103   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
 104 }
 105 
 106 static inline Address at_tos_p1() {
 107   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 108 }
 109 
 110 static inline Address at_tos_p2() {
 111   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 112 }
 113 
 114 
 115 // 32-bit ARM:
 116 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 117 // separate ldr instructions (supports nonadjacent values).
 118 // Used for longs in all modes, and for doubles in SOFTFP mode.
 119 //
 120 // AArch64: loads long local into R0_tos.
 121 //
 122 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 123   const Register Rlocal_base = tmp;
 124   assert_different_registers(Rlocal_index, tmp);
 125 
 126   get_local_base_addr(Rlocal_base, Rlocal_index);
 127 #ifdef AARCH64
 128   __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 129 #else
 130   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 131   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 132 #endif // AARCH64
 133 }
 134 
 135 
 136 // 32-bit ARM:
 137 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 138 // separate str instructions (supports nonadjacent values).
 139 // Used for longs in all modes, and for doubles in SOFTFP mode
 140 //
 141 // AArch64: stores R0_tos to long local.
 142 //
 143 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 144   const Register Rlocal_base = tmp;
 145   assert_different_registers(Rlocal_index, tmp);
 146 
 147   get_local_base_addr(Rlocal_base, Rlocal_index);
 148 #ifdef AARCH64
 149   __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 150 #else
 151   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 152   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 153 #endif // AARCH64
 154 }
 155 
 156 // Returns address of Java array element using temp register as address base.
 157 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 158   int logElemSize = exact_log2(type2aelembytes(elemType));
 159   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 160   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 161 }
 162 
 163 //----------------------------------------------------------------------------------------------------
 164 // Condition conversion
 165 AsmCondition convNegCond(TemplateTable::Condition cc) {
 166   switch (cc) {
 167     case TemplateTable::equal        : return ne;
 168     case TemplateTable::not_equal    : return eq;
 169     case TemplateTable::less         : return ge;
 170     case TemplateTable::less_equal   : return gt;
 171     case TemplateTable::greater      : return le;
 172     case TemplateTable::greater_equal: return lt;
 173   }
 174   ShouldNotReachHere();
 175   return nv;
 176 }
 177 
 178 //----------------------------------------------------------------------------------------------------
 179 // Miscelaneous helper routines
 180 
 181 // Store an oop (or NULL) at the address described by obj.
 182 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 183 // Also destroys new_val and obj.base().
 184 static void do_oop_store(InterpreterMacroAssembler* _masm,
 185                          Address obj,
 186                          Register new_val,
 187                          Register tmp1,
 188                          Register tmp2,
 189                          Register tmp3,
 190                          BarrierSet::Name barrier,
 191                          bool precise,
 192                          bool is_null) {
 193 
 194   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 195   switch (barrier) {
 196 #if INCLUDE_ALL_GCS
 197     case BarrierSet::G1BarrierSet:
 198       {
 199         // flatten object address if needed
 200         assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
 201 
 202         const Register store_addr = obj.base();
 203         if (obj.index() != noreg) {
 204           assert (obj.disp() == 0, "index or displacement, not both");
 205 #ifdef AARCH64
 206           __ add(store_addr, obj.base(), obj.index(), obj.extend(), obj.shift_imm());
 207 #else
 208           assert(obj.offset_op() == add_offset, "addition is expected");
 209           __ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
 210 #endif // AARCH64
 211         } else if (obj.disp() != 0) {
 212           __ add(store_addr, obj.base(), obj.disp());
 213         }
 214 
 215         __ g1_write_barrier_pre(store_addr, new_val, tmp1, tmp2, tmp3);
 216         if (is_null) {
 217           __ store_heap_oop_null(new_val, Address(store_addr));
 218         } else {
 219           // G1 barrier needs uncompressed oop for region cross check.
 220           Register val_to_store = new_val;
 221           if (UseCompressedOops) {
 222             val_to_store = tmp1;
 223             __ mov(val_to_store, new_val);
 224           }
 225           __ store_heap_oop(val_to_store, Address(store_addr)); // blows val_to_store:
 226           val_to_store = noreg;
 227           __ g1_write_barrier_post(store_addr, new_val, tmp1, tmp2, tmp3);
 228         }
 229       }
 230       break;
 231 #endif // INCLUDE_ALL_GCS
 232     case BarrierSet::CardTableBarrierSet:
 233       {
 234         if (is_null) {
 235           __ store_heap_oop_null(new_val, obj);
 236         } else {
 237           assert (!precise || (obj.index() == noreg && obj.disp() == 0),
 238                   "store check address should be calculated beforehand");
 239 
 240           __ store_check_part1(tmp1);
 241           __ store_heap_oop(new_val, obj); // blows new_val:
 242           new_val = noreg;
 243           __ store_check_part2(obj.base(), tmp1, tmp2);
 244         }
 245       }
 246       break;
 247     case BarrierSet::ModRef:
 248       ShouldNotReachHere();
 249       break;
 250     default:
 251       ShouldNotReachHere();
 252       break;
 253   }
 254 }
 255 
 256 Address TemplateTable::at_bcp(int offset) {
 257   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 258   return Address(Rbcp, offset);
 259 }
 260 
 261 
 262 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
 263 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 264                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 265                                    int byte_no) {
 266   assert_different_registers(bc_reg, temp_reg);
 267   if (!RewriteBytecodes)  return;
 268   Label L_patch_done;
 269 
 270   switch (bc) {
 271   case Bytecodes::_fast_aputfield:
 272   case Bytecodes::_fast_bputfield:
 273   case Bytecodes::_fast_zputfield:
 274   case Bytecodes::_fast_cputfield:
 275   case Bytecodes::_fast_dputfield:
 276   case Bytecodes::_fast_fputfield:
 277   case Bytecodes::_fast_iputfield:
 278   case Bytecodes::_fast_lputfield:
 279   case Bytecodes::_fast_sputfield:
 280     {
 281       // We skip bytecode quickening for putfield instructions when
 282       // the put_code written to the constant pool cache is zero.
 283       // This is required so that every execution of this instruction
 284       // calls out to InterpreterRuntime::resolve_get_put to do
 285       // additional, required work.
 286       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 287       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 288       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 289       __ mov(bc_reg, bc);
 290       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 291     }
 292     break;
 293   default:
 294     assert(byte_no == -1, "sanity");
 295     // the pair bytecodes have already done the load.
 296     if (load_bc_into_bc_reg) {
 297       __ mov(bc_reg, bc);
 298     }
 299   }
 300 
 301   if (__ can_post_breakpoint()) {
 302     Label L_fast_patch;
 303     // if a breakpoint is present we can't rewrite the stream directly
 304     __ ldrb(temp_reg, at_bcp(0));
 305     __ cmp(temp_reg, Bytecodes::_breakpoint);
 306     __ b(L_fast_patch, ne);
 307     if (bc_reg != R3) {
 308       __ mov(R3, bc_reg);
 309     }
 310     __ mov(R1, Rmethod);
 311     __ mov(R2, Rbcp);
 312     // Let breakpoint table handling rewrite to quicker bytecode
 313     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 314     __ b(L_patch_done);
 315     __ bind(L_fast_patch);
 316   }
 317 
 318 #ifdef ASSERT
 319   Label L_okay;
 320   __ ldrb(temp_reg, at_bcp(0));
 321   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 322   __ b(L_okay, eq);
 323   __ cmp(temp_reg, bc_reg);
 324   __ b(L_okay, eq);
 325   __ stop("patching the wrong bytecode");
 326   __ bind(L_okay);
 327 #endif
 328 
 329   // patch bytecode
 330   __ strb(bc_reg, at_bcp(0));
 331   __ bind(L_patch_done);
 332 }
 333 
 334 //----------------------------------------------------------------------------------------------------
 335 // Individual instructions
 336 
 337 void TemplateTable::nop() {
 338   transition(vtos, vtos);
 339   // nothing to do
 340 }
 341 
 342 void TemplateTable::shouldnotreachhere() {
 343   transition(vtos, vtos);
 344   __ stop("shouldnotreachhere bytecode");
 345 }
 346 
 347 
 348 
 349 void TemplateTable::aconst_null() {
 350   transition(vtos, atos);
 351   __ mov(R0_tos, 0);
 352 }
 353 
 354 
 355 void TemplateTable::iconst(int value) {
 356   transition(vtos, itos);
 357   __ mov_slow(R0_tos, value);
 358 }
 359 
 360 
 361 void TemplateTable::lconst(int value) {
 362   transition(vtos, ltos);
 363   assert((value == 0) || (value == 1), "unexpected long constant");
 364   __ mov(R0_tos, value);
 365 #ifndef AARCH64
 366   __ mov(R1_tos_hi, 0);
 367 #endif // !AARCH64
 368 }
 369 
 370 
 371 void TemplateTable::fconst(int value) {
 372   transition(vtos, ftos);
 373 #ifdef AARCH64
 374   switch(value) {
 375   case 0:   __ fmov_sw(S0_tos, ZR);    break;
 376   case 1:   __ fmov_s (S0_tos, 0x70);  break;
 377   case 2:   __ fmov_s (S0_tos, 0x00);  break;
 378   default:  ShouldNotReachHere();      break;
 379   }
 380 #else
 381   const int zero = 0;         // 0.0f
 382   const int one = 0x3f800000; // 1.0f
 383   const int two = 0x40000000; // 2.0f
 384 
 385   switch(value) {
 386   case 0:   __ mov(R0_tos, zero);   break;
 387   case 1:   __ mov(R0_tos, one);    break;
 388   case 2:   __ mov(R0_tos, two);    break;
 389   default:  ShouldNotReachHere();   break;
 390   }
 391 
 392 #ifndef __SOFTFP__
 393   __ fmsr(S0_tos, R0_tos);
 394 #endif // !__SOFTFP__
 395 #endif // AARCH64
 396 }
 397 
 398 
 399 void TemplateTable::dconst(int value) {
 400   transition(vtos, dtos);
 401 #ifdef AARCH64
 402   switch(value) {
 403   case 0:   __ fmov_dx(D0_tos, ZR);    break;
 404   case 1:   __ fmov_d (D0_tos, 0x70);  break;
 405   default:  ShouldNotReachHere();      break;
 406   }
 407 #else
 408   const int one_lo = 0;            // low part of 1.0
 409   const int one_hi = 0x3ff00000;   // high part of 1.0
 410 
 411   if (value == 0) {
 412 #ifdef __SOFTFP__
 413     __ mov(R0_tos_lo, 0);
 414     __ mov(R1_tos_hi, 0);
 415 #else
 416     __ mov(R0_tmp, 0);
 417     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 418 #endif // __SOFTFP__
 419   } else if (value == 1) {
 420     __ mov(R0_tos_lo, one_lo);
 421     __ mov_slow(R1_tos_hi, one_hi);
 422 #ifndef __SOFTFP__
 423     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 424 #endif // !__SOFTFP__
 425   } else {
 426     ShouldNotReachHere();
 427   }
 428 #endif // AARCH64
 429 }
 430 
 431 
 432 void TemplateTable::bipush() {
 433   transition(vtos, itos);
 434   __ ldrsb(R0_tos, at_bcp(1));
 435 }
 436 
 437 
 438 void TemplateTable::sipush() {
 439   transition(vtos, itos);
 440   __ ldrsb(R0_tmp, at_bcp(1));
 441   __ ldrb(R1_tmp, at_bcp(2));
 442   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 443 }
 444 
 445 
 446 void TemplateTable::ldc(bool wide) {
 447   transition(vtos, vtos);
 448   Label fastCase, Done;
 449 
 450   const Register Rindex = R1_tmp;
 451   const Register Rcpool = R2_tmp;
 452   const Register Rtags  = R3_tmp;
 453   const Register RtagType = R3_tmp;
 454 
 455   if (wide) {
 456     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 457   } else {
 458     __ ldrb(Rindex, at_bcp(1));
 459   }
 460   __ get_cpool_and_tags(Rcpool, Rtags);
 461 
 462   const int base_offset = ConstantPool::header_size() * wordSize;
 463   const int tags_offset = Array<u1>::base_offset_in_bytes();
 464 
 465   // get const type
 466   __ add(Rtemp, Rtags, tags_offset);
 467 #ifdef AARCH64
 468   __ add(Rtemp, Rtemp, Rindex);
 469   __ ldarb(RtagType, Rtemp);  // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
 470 #else
 471   __ ldrb(RtagType, Address(Rtemp, Rindex));
 472   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 473 #endif // AARCH64
 474 
 475   // unresolved class - get the resolved class
 476   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 477 
 478   // unresolved class in error (resolution failed) - call into runtime
 479   // so that the same error from first resolution attempt is thrown.
 480 #ifdef AARCH64
 481   __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
 482   __ cond_cmp(RtagType, Rtemp, ne);
 483 #else
 484   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 485 #endif // AARCH64
 486 
 487   // resolved class - need to call vm to get java mirror of the class
 488   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 489 
 490   __ b(fastCase, ne);
 491 
 492   // slow case - call runtime
 493   __ mov(R1, wide);
 494   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 495   __ push(atos);
 496   __ b(Done);
 497 
 498   // int, float, String
 499   __ bind(fastCase);
 500 #ifdef ASSERT
 501   { Label L;
 502     __ cmp(RtagType, JVM_CONSTANT_Integer);
 503     __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 504     __ b(L, eq);
 505     __ stop("unexpected tag type in ldc");
 506     __ bind(L);
 507   }
 508 #endif // ASSERT
 509   // itos, ftos
 510   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 511   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 512 
 513   // floats and ints are placed on stack in the same way, so
 514   // we can use push(itos) to transfer float value without VFP
 515   __ push(itos);
 516   __ bind(Done);
 517 }
 518 
 519 // Fast path for caching oop constants.
 520 void TemplateTable::fast_aldc(bool wide) {
 521   transition(vtos, atos);
 522   int index_size = wide ? sizeof(u2) : sizeof(u1);
 523   Label resolved;
 524 
 525   // We are resolved if the resolved reference cache entry contains a
 526   // non-null object (CallSite, etc.)
 527   assert_different_registers(R0_tos, R2_tmp);
 528   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 529   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 530   __ cbnz(R0_tos, resolved);
 531 
 532   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 533 
 534   // first time invocation - must resolve first
 535   __ mov(R1, (int)bytecode());
 536   __ call_VM(R0_tos, entry, R1);
 537   __ bind(resolved);
 538 
 539   if (VerifyOops) {
 540     __ verify_oop(R0_tos);
 541   }
 542 }
 543 
 544 void TemplateTable::ldc2_w() {
 545   transition(vtos, vtos);
 546   const Register Rtags  = R2_tmp;
 547   const Register Rindex = R3_tmp;
 548   const Register Rcpool = R4_tmp;
 549   const Register Rbase  = R5_tmp;
 550 
 551   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 552 
 553   __ get_cpool_and_tags(Rcpool, Rtags);
 554   const int base_offset = ConstantPool::header_size() * wordSize;
 555   const int tags_offset = Array<u1>::base_offset_in_bytes();
 556 
 557   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 558 
 559 #ifdef __ABI_HARD__
 560   Label Long, exit;
 561   // get type from tags
 562   __ add(Rtemp, Rtags, tags_offset);
 563   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 564   __ cmp(Rtemp, JVM_CONSTANT_Double);
 565   __ b(Long, ne);
 566   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 567 
 568   __ push(dtos);
 569   __ b(exit);
 570   __ bind(Long);
 571 #endif
 572 
 573 #ifdef AARCH64
 574   __ ldr(R0_tos, Address(Rbase, base_offset));
 575 #else
 576   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 577   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 578 #endif // AARCH64
 579   __ push(ltos);
 580 
 581 #ifdef __ABI_HARD__
 582   __ bind(exit);
 583 #endif
 584 }
 585 
 586 
 587 void TemplateTable::locals_index(Register reg, int offset) {
 588   __ ldrb(reg, at_bcp(offset));
 589 }
 590 
 591 void TemplateTable::iload() {
 592   iload_internal();
 593 }
 594 
 595 void TemplateTable::nofast_iload() {
 596   iload_internal(may_not_rewrite);
 597 }
 598 
 599 void TemplateTable::iload_internal(RewriteControl rc) {
 600   transition(vtos, itos);
 601 
 602   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 603     Label rewrite, done;
 604     const Register next_bytecode = R1_tmp;
 605     const Register target_bytecode = R2_tmp;
 606 
 607     // get next byte
 608     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 609     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 610     // last two iloads in a pair.  Comparing against fast_iload means that
 611     // the next bytecode is neither an iload or a caload, and therefore
 612     // an iload pair.
 613     __ cmp(next_bytecode, Bytecodes::_iload);
 614     __ b(done, eq);
 615 
 616     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 617     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 618     __ b(rewrite, eq);
 619 
 620     // if _caload, rewrite to fast_icaload
 621     __ cmp(next_bytecode, Bytecodes::_caload);
 622     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 623     __ b(rewrite, eq);
 624 
 625     // rewrite so iload doesn't check again.
 626     __ mov(target_bytecode, Bytecodes::_fast_iload);
 627 
 628     // rewrite
 629     // R2: fast bytecode
 630     __ bind(rewrite);
 631     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 632     __ bind(done);
 633   }
 634 
 635   // Get the local value into tos
 636   const Register Rlocal_index = R1_tmp;
 637   locals_index(Rlocal_index);
 638   Address local = load_iaddress(Rlocal_index, Rtemp);
 639   __ ldr_s32(R0_tos, local);
 640 }
 641 
 642 
 643 void TemplateTable::fast_iload2() {
 644   transition(vtos, itos);
 645   const Register Rlocal_index = R1_tmp;
 646 
 647   locals_index(Rlocal_index);
 648   Address local = load_iaddress(Rlocal_index, Rtemp);
 649   __ ldr_s32(R0_tos, local);
 650   __ push(itos);
 651 
 652   locals_index(Rlocal_index, 3);
 653   local = load_iaddress(Rlocal_index, Rtemp);
 654   __ ldr_s32(R0_tos, local);
 655 }
 656 
 657 void TemplateTable::fast_iload() {
 658   transition(vtos, itos);
 659   const Register Rlocal_index = R1_tmp;
 660 
 661   locals_index(Rlocal_index);
 662   Address local = load_iaddress(Rlocal_index, Rtemp);
 663   __ ldr_s32(R0_tos, local);
 664 }
 665 
 666 
 667 void TemplateTable::lload() {
 668   transition(vtos, ltos);
 669   const Register Rlocal_index = R2_tmp;
 670 
 671   locals_index(Rlocal_index);
 672   load_category2_local(Rlocal_index, R3_tmp);
 673 }
 674 
 675 
 676 void TemplateTable::fload() {
 677   transition(vtos, ftos);
 678   const Register Rlocal_index = R2_tmp;
 679 
 680   // Get the local value into tos
 681   locals_index(Rlocal_index);
 682   Address local = load_faddress(Rlocal_index, Rtemp);
 683 #ifdef __SOFTFP__
 684   __ ldr(R0_tos, local);
 685 #else
 686   __ ldr_float(S0_tos, local);
 687 #endif // __SOFTFP__
 688 }
 689 
 690 
 691 void TemplateTable::dload() {
 692   transition(vtos, dtos);
 693   const Register Rlocal_index = R2_tmp;
 694 
 695   locals_index(Rlocal_index);
 696 
 697 #ifdef __SOFTFP__
 698   load_category2_local(Rlocal_index, R3_tmp);
 699 #else
 700   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 701 #endif // __SOFTFP__
 702 }
 703 
 704 
 705 void TemplateTable::aload() {
 706   transition(vtos, atos);
 707   const Register Rlocal_index = R1_tmp;
 708 
 709   locals_index(Rlocal_index);
 710   Address local = load_aaddress(Rlocal_index, Rtemp);
 711   __ ldr(R0_tos, local);
 712 }
 713 
 714 
 715 void TemplateTable::locals_index_wide(Register reg) {
 716   assert_different_registers(reg, Rtemp);
 717   __ ldrb(Rtemp, at_bcp(2));
 718   __ ldrb(reg, at_bcp(3));
 719   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 720 }
 721 
 722 
 723 void TemplateTable::wide_iload() {
 724   transition(vtos, itos);
 725   const Register Rlocal_index = R2_tmp;
 726 
 727   locals_index_wide(Rlocal_index);
 728   Address local = load_iaddress(Rlocal_index, Rtemp);
 729   __ ldr_s32(R0_tos, local);
 730 }
 731 
 732 
 733 void TemplateTable::wide_lload() {
 734   transition(vtos, ltos);
 735   const Register Rlocal_index = R2_tmp;
 736   const Register Rlocal_base = R3_tmp;
 737 
 738   locals_index_wide(Rlocal_index);
 739   load_category2_local(Rlocal_index, R3_tmp);
 740 }
 741 
 742 
 743 void TemplateTable::wide_fload() {
 744   transition(vtos, ftos);
 745   const Register Rlocal_index = R2_tmp;
 746 
 747   locals_index_wide(Rlocal_index);
 748   Address local = load_faddress(Rlocal_index, Rtemp);
 749 #ifdef __SOFTFP__
 750   __ ldr(R0_tos, local);
 751 #else
 752   __ ldr_float(S0_tos, local);
 753 #endif // __SOFTFP__
 754 }
 755 
 756 
 757 void TemplateTable::wide_dload() {
 758   transition(vtos, dtos);
 759   const Register Rlocal_index = R2_tmp;
 760 
 761   locals_index_wide(Rlocal_index);
 762 #ifdef __SOFTFP__
 763   load_category2_local(Rlocal_index, R3_tmp);
 764 #else
 765   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 766 #endif // __SOFTFP__
 767 }
 768 
 769 
 770 void TemplateTable::wide_aload() {
 771   transition(vtos, atos);
 772   const Register Rlocal_index = R2_tmp;
 773 
 774   locals_index_wide(Rlocal_index);
 775   Address local = load_aaddress(Rlocal_index, Rtemp);
 776   __ ldr(R0_tos, local);
 777 }
 778 
 779 void TemplateTable::index_check(Register array, Register index) {
 780   // Pop ptr into array
 781   __ pop_ptr(array);
 782   index_check_without_pop(array, index);
 783 }
 784 
 785 void TemplateTable::index_check_without_pop(Register array, Register index) {
 786   assert_different_registers(array, index, Rtemp);
 787   // check array
 788   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 789   // check index
 790   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 791   __ cmp_32(index, Rtemp);
 792   if (index != R4_ArrayIndexOutOfBounds_index) {
 793     // convention with generate_ArrayIndexOutOfBounds_handler()
 794     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 795   }
 796   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 797 }
 798 
 799 
 800 void TemplateTable::iaload() {
 801   transition(itos, itos);
 802   const Register Rarray = R1_tmp;
 803   const Register Rindex = R0_tos;
 804 
 805   index_check(Rarray, Rindex);
 806   __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
 807 }
 808 
 809 
 810 void TemplateTable::laload() {
 811   transition(itos, ltos);
 812   const Register Rarray = R1_tmp;
 813   const Register Rindex = R0_tos;
 814 
 815   index_check(Rarray, Rindex);
 816 
 817 #ifdef AARCH64
 818   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 819 #else
 820   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 821   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
 822   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 823 #endif // AARCH64
 824 }
 825 
 826 
 827 void TemplateTable::faload() {
 828   transition(itos, ftos);
 829   const Register Rarray = R1_tmp;
 830   const Register Rindex = R0_tos;
 831 
 832   index_check(Rarray, Rindex);
 833 
 834   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
 835 #ifdef __SOFTFP__
 836   __ ldr(R0_tos, addr);
 837 #else
 838   __ ldr_float(S0_tos, addr);
 839 #endif // __SOFTFP__
 840 }
 841 
 842 
 843 void TemplateTable::daload() {
 844   transition(itos, dtos);
 845   const Register Rarray = R1_tmp;
 846   const Register Rindex = R0_tos;
 847 
 848   index_check(Rarray, Rindex);
 849 
 850 #ifdef __SOFTFP__
 851   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 852   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
 853   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 854 #else
 855   __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
 856 #endif // __SOFTFP__
 857 }
 858 
 859 
 860 void TemplateTable::aaload() {
 861   transition(itos, atos);
 862   const Register Rarray = R1_tmp;
 863   const Register Rindex = R0_tos;
 864 
 865   index_check(Rarray, Rindex);
 866   __ load_heap_oop(R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp));
 867 }
 868 
 869 
 870 void TemplateTable::baload() {
 871   transition(itos, itos);
 872   const Register Rarray = R1_tmp;
 873   const Register Rindex = R0_tos;
 874 
 875   index_check(Rarray, Rindex);
 876   __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
 877 }
 878 
 879 
 880 void TemplateTable::caload() {
 881   transition(itos, itos);
 882   const Register Rarray = R1_tmp;
 883   const Register Rindex = R0_tos;
 884 
 885   index_check(Rarray, Rindex);
 886   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 887 }
 888 
 889 
 890 // iload followed by caload frequent pair
 891 void TemplateTable::fast_icaload() {
 892   transition(vtos, itos);
 893   const Register Rlocal_index = R1_tmp;
 894   const Register Rarray = R1_tmp;
 895   const Register Rindex = R4_tmp; // index_check prefers index on R4
 896   assert_different_registers(Rlocal_index, Rindex);
 897   assert_different_registers(Rarray, Rindex);
 898 
 899   // load index out of locals
 900   locals_index(Rlocal_index);
 901   Address local = load_iaddress(Rlocal_index, Rtemp);
 902   __ ldr_s32(Rindex, local);
 903 
 904   // get array element
 905   index_check(Rarray, Rindex);
 906   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 907 }
 908 
 909 
 910 void TemplateTable::saload() {
 911   transition(itos, itos);
 912   const Register Rarray = R1_tmp;
 913   const Register Rindex = R0_tos;
 914 
 915   index_check(Rarray, Rindex);
 916   __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
 917 }
 918 
 919 
 920 void TemplateTable::iload(int n) {
 921   transition(vtos, itos);
 922   __ ldr_s32(R0_tos, iaddress(n));
 923 }
 924 
 925 
 926 void TemplateTable::lload(int n) {
 927   transition(vtos, ltos);
 928 #ifdef AARCH64
 929   __ ldr(R0_tos, laddress(n));
 930 #else
 931   __ ldr(R0_tos_lo, laddress(n));
 932   __ ldr(R1_tos_hi, haddress(n));
 933 #endif // AARCH64
 934 }
 935 
 936 
 937 void TemplateTable::fload(int n) {
 938   transition(vtos, ftos);
 939 #ifdef __SOFTFP__
 940   __ ldr(R0_tos, faddress(n));
 941 #else
 942   __ ldr_float(S0_tos, faddress(n));
 943 #endif // __SOFTFP__
 944 }
 945 
 946 
 947 void TemplateTable::dload(int n) {
 948   transition(vtos, dtos);
 949 #ifdef __SOFTFP__
 950   __ ldr(R0_tos_lo, laddress(n));
 951   __ ldr(R1_tos_hi, haddress(n));
 952 #else
 953   __ ldr_double(D0_tos, daddress(n));
 954 #endif // __SOFTFP__
 955 }
 956 
 957 
 958 void TemplateTable::aload(int n) {
 959   transition(vtos, atos);
 960   __ ldr(R0_tos, aaddress(n));
 961 }
 962 
 963 void TemplateTable::aload_0() {
 964   aload_0_internal();
 965 }
 966 
 967 void TemplateTable::nofast_aload_0() {
 968   aload_0_internal(may_not_rewrite);
 969 }
 970 
 971 void TemplateTable::aload_0_internal(RewriteControl rc) {
 972   transition(vtos, atos);
 973   // According to bytecode histograms, the pairs:
 974   //
 975   // _aload_0, _fast_igetfield
 976   // _aload_0, _fast_agetfield
 977   // _aload_0, _fast_fgetfield
 978   //
 979   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
 980   // bytecode checks if the next bytecode is either _fast_igetfield,
 981   // _fast_agetfield or _fast_fgetfield and then rewrites the
 982   // current bytecode into a pair bytecode; otherwise it rewrites the current
 983   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
 984   //
 985   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
 986   //       otherwise we may miss an opportunity for a pair.
 987   //
 988   // Also rewrite frequent pairs
 989   //   aload_0, aload_1
 990   //   aload_0, iload_1
 991   // These bytecodes with a small amount of code are most profitable to rewrite
 992   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 993     Label rewrite, done;
 994     const Register next_bytecode = R1_tmp;
 995     const Register target_bytecode = R2_tmp;
 996 
 997     // get next byte
 998     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 999 
1000     // if _getfield then wait with rewrite
1001     __ cmp(next_bytecode, Bytecodes::_getfield);
1002     __ b(done, eq);
1003 
1004     // if _igetfield then rewrite to _fast_iaccess_0
1005     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1006     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1007     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1008     __ b(rewrite, eq);
1009 
1010     // if _agetfield then rewrite to _fast_aaccess_0
1011     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1012     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1013     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1014     __ b(rewrite, eq);
1015 
1016     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1017     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1018     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1019 
1020     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1021 #ifdef AARCH64
1022     __ mov(Rtemp, Bytecodes::_fast_faccess_0);
1023     __ mov(target_bytecode, Bytecodes::_fast_aload_0);
1024     __ mov(target_bytecode, Rtemp, eq);
1025 #else
1026     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1027     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1028 #endif // AARCH64
1029 
1030     // rewrite
1031     __ bind(rewrite);
1032     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1033 
1034     __ bind(done);
1035   }
1036 
1037   aload(0);
1038 }
1039 
1040 void TemplateTable::istore() {
1041   transition(itos, vtos);
1042   const Register Rlocal_index = R2_tmp;
1043 
1044   locals_index(Rlocal_index);
1045   Address local = load_iaddress(Rlocal_index, Rtemp);
1046   __ str_32(R0_tos, local);
1047 }
1048 
1049 
1050 void TemplateTable::lstore() {
1051   transition(ltos, vtos);
1052   const Register Rlocal_index = R2_tmp;
1053 
1054   locals_index(Rlocal_index);
1055   store_category2_local(Rlocal_index, R3_tmp);
1056 }
1057 
1058 
1059 void TemplateTable::fstore() {
1060   transition(ftos, vtos);
1061   const Register Rlocal_index = R2_tmp;
1062 
1063   locals_index(Rlocal_index);
1064   Address local = load_faddress(Rlocal_index, Rtemp);
1065 #ifdef __SOFTFP__
1066   __ str(R0_tos, local);
1067 #else
1068   __ str_float(S0_tos, local);
1069 #endif // __SOFTFP__
1070 }
1071 
1072 
1073 void TemplateTable::dstore() {
1074   transition(dtos, vtos);
1075   const Register Rlocal_index = R2_tmp;
1076 
1077   locals_index(Rlocal_index);
1078 
1079 #ifdef __SOFTFP__
1080   store_category2_local(Rlocal_index, R3_tmp);
1081 #else
1082   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1083 #endif // __SOFTFP__
1084 }
1085 
1086 
1087 void TemplateTable::astore() {
1088   transition(vtos, vtos);
1089   const Register Rlocal_index = R1_tmp;
1090 
1091   __ pop_ptr(R0_tos);
1092   locals_index(Rlocal_index);
1093   Address local = load_aaddress(Rlocal_index, Rtemp);
1094   __ str(R0_tos, local);
1095 }
1096 
1097 
1098 void TemplateTable::wide_istore() {
1099   transition(vtos, vtos);
1100   const Register Rlocal_index = R2_tmp;
1101 
1102   __ pop_i(R0_tos);
1103   locals_index_wide(Rlocal_index);
1104   Address local = load_iaddress(Rlocal_index, Rtemp);
1105   __ str_32(R0_tos, local);
1106 }
1107 
1108 
1109 void TemplateTable::wide_lstore() {
1110   transition(vtos, vtos);
1111   const Register Rlocal_index = R2_tmp;
1112   const Register Rlocal_base = R3_tmp;
1113 
1114 #ifdef AARCH64
1115   __ pop_l(R0_tos);
1116 #else
1117   __ pop_l(R0_tos_lo, R1_tos_hi);
1118 #endif // AARCH64
1119 
1120   locals_index_wide(Rlocal_index);
1121   store_category2_local(Rlocal_index, R3_tmp);
1122 }
1123 
1124 
1125 void TemplateTable::wide_fstore() {
1126   wide_istore();
1127 }
1128 
1129 
1130 void TemplateTable::wide_dstore() {
1131   wide_lstore();
1132 }
1133 
1134 
1135 void TemplateTable::wide_astore() {
1136   transition(vtos, vtos);
1137   const Register Rlocal_index = R2_tmp;
1138 
1139   __ pop_ptr(R0_tos);
1140   locals_index_wide(Rlocal_index);
1141   Address local = load_aaddress(Rlocal_index, Rtemp);
1142   __ str(R0_tos, local);
1143 }
1144 
1145 
1146 void TemplateTable::iastore() {
1147   transition(itos, vtos);
1148   const Register Rindex = R4_tmp; // index_check prefers index in R4
1149   const Register Rarray = R3_tmp;
1150   // R0_tos: value
1151 
1152   __ pop_i(Rindex);
1153   index_check(Rarray, Rindex);
1154   __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1155 }
1156 
1157 
1158 void TemplateTable::lastore() {
1159   transition(ltos, vtos);
1160   const Register Rindex = R4_tmp; // index_check prefers index in R4
1161   const Register Rarray = R3_tmp;
1162   // R0_tos_lo:R1_tos_hi: value
1163 
1164   __ pop_i(Rindex);
1165   index_check(Rarray, Rindex);
1166 
1167 #ifdef AARCH64
1168   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1169 #else
1170   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1171   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1172   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1173 #endif // AARCH64
1174 }
1175 
1176 
1177 void TemplateTable::fastore() {
1178   transition(ftos, vtos);
1179   const Register Rindex = R4_tmp; // index_check prefers index in R4
1180   const Register Rarray = R3_tmp;
1181   // S0_tos/R0_tos: value
1182 
1183   __ pop_i(Rindex);
1184   index_check(Rarray, Rindex);
1185   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1186 
1187 #ifdef __SOFTFP__
1188   __ str(R0_tos, addr);
1189 #else
1190   __ str_float(S0_tos, addr);
1191 #endif // __SOFTFP__
1192 }
1193 
1194 
1195 void TemplateTable::dastore() {
1196   transition(dtos, vtos);
1197   const Register Rindex = R4_tmp; // index_check prefers index in R4
1198   const Register Rarray = R3_tmp;
1199   // D0_tos / R0_tos_lo:R1_to_hi: value
1200 
1201   __ pop_i(Rindex);
1202   index_check(Rarray, Rindex);
1203 
1204 #ifdef __SOFTFP__
1205   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1206   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1207   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1208 #else
1209   __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1210 #endif // __SOFTFP__
1211 }
1212 
1213 
1214 void TemplateTable::aastore() {
1215   transition(vtos, vtos);
1216   Label is_null, throw_array_store, done;
1217 
1218   const Register Raddr_1   = R1_tmp;
1219   const Register Rvalue_2  = R2_tmp;
1220   const Register Rarray_3  = R3_tmp;
1221   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1222   const Register Rsub_5    = R5_tmp;
1223   const Register Rsuper_LR = LR_tmp;
1224 
1225   // stack: ..., array, index, value
1226   __ ldr(Rvalue_2, at_tos());     // Value
1227   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1228   __ ldr(Rarray_3, at_tos_p2());  // Array
1229 
1230   index_check_without_pop(Rarray_3, Rindex_4);
1231 
1232   // Compute the array base
1233   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1234 
1235   // do array store check - check for NULL value first
1236   __ cbz(Rvalue_2, is_null);
1237 
1238   // Load subklass
1239   __ load_klass(Rsub_5, Rvalue_2);
1240   // Load superklass
1241   __ load_klass(Rtemp, Rarray_3);
1242   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1243 
1244   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1245   // Come here on success
1246 
1247   // Store value
1248   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1249 
1250   // Now store using the appropriate barrier
1251   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, false);
1252   __ b(done);
1253 
1254   __ bind(throw_array_store);
1255 
1256   // Come here on failure of subtype check
1257   __ profile_typecheck_failed(R0_tmp);
1258 
1259   // object is at TOS
1260   __ b(Interpreter::_throw_ArrayStoreException_entry);
1261 
1262   // Have a NULL in Rvalue_2, store NULL at array[index].
1263   __ bind(is_null);
1264   __ profile_null_seen(R0_tmp);
1265 
1266   // Store a NULL
1267   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, true);
1268 
1269   // Pop stack arguments
1270   __ bind(done);
1271   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1272 }
1273 
1274 
1275 void TemplateTable::bastore() {
1276   transition(itos, vtos);
1277   const Register Rindex = R4_tmp; // index_check prefers index in R4
1278   const Register Rarray = R3_tmp;
1279   // R0_tos: value
1280 
1281   __ pop_i(Rindex);
1282   index_check(Rarray, Rindex);
1283 
1284   // Need to check whether array is boolean or byte
1285   // since both types share the bastore bytecode.
1286   __ load_klass(Rtemp, Rarray);
1287   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1288   Label L_skip;
1289   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1290   __ b(L_skip, eq);
1291   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1292   __ bind(L_skip);
1293   __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1294 }
1295 
1296 
1297 void TemplateTable::castore() {
1298   transition(itos, vtos);
1299   const Register Rindex = R4_tmp; // index_check prefers index in R4
1300   const Register Rarray = R3_tmp;
1301   // R0_tos: value
1302 
1303   __ pop_i(Rindex);
1304   index_check(Rarray, Rindex);
1305 
1306   __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1307 }
1308 
1309 
1310 void TemplateTable::sastore() {
1311   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1312            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1313          "base offsets for char and short should be equal");
1314   castore();
1315 }
1316 
1317 
1318 void TemplateTable::istore(int n) {
1319   transition(itos, vtos);
1320   __ str_32(R0_tos, iaddress(n));
1321 }
1322 
1323 
1324 void TemplateTable::lstore(int n) {
1325   transition(ltos, vtos);
1326 #ifdef AARCH64
1327   __ str(R0_tos, laddress(n));
1328 #else
1329   __ str(R0_tos_lo, laddress(n));
1330   __ str(R1_tos_hi, haddress(n));
1331 #endif // AARCH64
1332 }
1333 
1334 
1335 void TemplateTable::fstore(int n) {
1336   transition(ftos, vtos);
1337 #ifdef __SOFTFP__
1338   __ str(R0_tos, faddress(n));
1339 #else
1340   __ str_float(S0_tos, faddress(n));
1341 #endif // __SOFTFP__
1342 }
1343 
1344 
1345 void TemplateTable::dstore(int n) {
1346   transition(dtos, vtos);
1347 #ifdef __SOFTFP__
1348   __ str(R0_tos_lo, laddress(n));
1349   __ str(R1_tos_hi, haddress(n));
1350 #else
1351   __ str_double(D0_tos, daddress(n));
1352 #endif // __SOFTFP__
1353 }
1354 
1355 
1356 void TemplateTable::astore(int n) {
1357   transition(vtos, vtos);
1358   __ pop_ptr(R0_tos);
1359   __ str(R0_tos, aaddress(n));
1360 }
1361 
1362 
1363 void TemplateTable::pop() {
1364   transition(vtos, vtos);
1365   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1366 }
1367 
1368 
1369 void TemplateTable::pop2() {
1370   transition(vtos, vtos);
1371   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1372 }
1373 
1374 
1375 void TemplateTable::dup() {
1376   transition(vtos, vtos);
1377   // stack: ..., a
1378   __ load_ptr(0, R0_tmp);
1379   __ push_ptr(R0_tmp);
1380   // stack: ..., a, a
1381 }
1382 
1383 
1384 void TemplateTable::dup_x1() {
1385   transition(vtos, vtos);
1386   // stack: ..., a, b
1387   __ load_ptr(0, R0_tmp);  // load b
1388   __ load_ptr(1, R2_tmp);  // load a
1389   __ store_ptr(1, R0_tmp); // store b
1390   __ store_ptr(0, R2_tmp); // store a
1391   __ push_ptr(R0_tmp);     // push b
1392   // stack: ..., b, a, b
1393 }
1394 
1395 
1396 void TemplateTable::dup_x2() {
1397   transition(vtos, vtos);
1398   // stack: ..., a, b, c
1399   __ load_ptr(0, R0_tmp);   // load c
1400   __ load_ptr(1, R2_tmp);   // load b
1401   __ load_ptr(2, R4_tmp);   // load a
1402 
1403   __ push_ptr(R0_tmp);      // push c
1404 
1405   // stack: ..., a, b, c, c
1406   __ store_ptr(1, R2_tmp);  // store b
1407   __ store_ptr(2, R4_tmp);  // store a
1408   __ store_ptr(3, R0_tmp);  // store c
1409   // stack: ..., c, a, b, c
1410 }
1411 
1412 
1413 void TemplateTable::dup2() {
1414   transition(vtos, vtos);
1415   // stack: ..., a, b
1416   __ load_ptr(1, R0_tmp);  // load a
1417   __ push_ptr(R0_tmp);     // push a
1418   __ load_ptr(1, R0_tmp);  // load b
1419   __ push_ptr(R0_tmp);     // push b
1420   // stack: ..., a, b, a, b
1421 }
1422 
1423 
1424 void TemplateTable::dup2_x1() {
1425   transition(vtos, vtos);
1426 
1427   // stack: ..., a, b, c
1428   __ load_ptr(0, R4_tmp);  // load c
1429   __ load_ptr(1, R2_tmp);  // load b
1430   __ load_ptr(2, R0_tmp);  // load a
1431 
1432   __ push_ptr(R2_tmp);     // push b
1433   __ push_ptr(R4_tmp);     // push c
1434 
1435   // stack: ..., a, b, c, b, c
1436 
1437   __ store_ptr(2, R0_tmp);  // store a
1438   __ store_ptr(3, R4_tmp);  // store c
1439   __ store_ptr(4, R2_tmp);  // store b
1440 
1441   // stack: ..., b, c, a, b, c
1442 }
1443 
1444 
1445 void TemplateTable::dup2_x2() {
1446   transition(vtos, vtos);
1447   // stack: ..., a, b, c, d
1448   __ load_ptr(0, R0_tmp);  // load d
1449   __ load_ptr(1, R2_tmp);  // load c
1450   __ push_ptr(R2_tmp);     // push c
1451   __ push_ptr(R0_tmp);     // push d
1452   // stack: ..., a, b, c, d, c, d
1453   __ load_ptr(4, R4_tmp);  // load b
1454   __ store_ptr(4, R0_tmp); // store d in b
1455   __ store_ptr(2, R4_tmp); // store b in d
1456   // stack: ..., a, d, c, b, c, d
1457   __ load_ptr(5, R4_tmp);  // load a
1458   __ store_ptr(5, R2_tmp); // store c in a
1459   __ store_ptr(3, R4_tmp); // store a in c
1460   // stack: ..., c, d, a, b, c, d
1461 }
1462 
1463 
1464 void TemplateTable::swap() {
1465   transition(vtos, vtos);
1466   // stack: ..., a, b
1467   __ load_ptr(1, R0_tmp);  // load a
1468   __ load_ptr(0, R2_tmp);  // load b
1469   __ store_ptr(0, R0_tmp); // store a in b
1470   __ store_ptr(1, R2_tmp); // store b in a
1471   // stack: ..., b, a
1472 }
1473 
1474 
1475 void TemplateTable::iop2(Operation op) {
1476   transition(itos, itos);
1477   const Register arg1 = R1_tmp;
1478   const Register arg2 = R0_tos;
1479 
1480   __ pop_i(arg1);
1481   switch (op) {
1482     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1483     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1484     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1485     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1486     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1487     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1488 #ifdef AARCH64
1489     case shl  : __ lslv_w (R0_tos, arg1, arg2); break;
1490     case shr  : __ asrv_w (R0_tos, arg1, arg2); break;
1491     case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1492 #else
1493     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1494     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1495     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1496 #endif // AARCH64
1497     default   : ShouldNotReachHere();
1498   }
1499 }
1500 
1501 
1502 void TemplateTable::lop2(Operation op) {
1503   transition(ltos, ltos);
1504 #ifdef AARCH64
1505   const Register arg1 = R1_tmp;
1506   const Register arg2 = R0_tos;
1507 
1508   __ pop_l(arg1);
1509   switch (op) {
1510     case add  : __ add (R0_tos, arg1, arg2); break;
1511     case sub  : __ sub (R0_tos, arg1, arg2); break;
1512     case _and : __ andr(R0_tos, arg1, arg2); break;
1513     case _or  : __ orr (R0_tos, arg1, arg2); break;
1514     case _xor : __ eor (R0_tos, arg1, arg2); break;
1515     default   : ShouldNotReachHere();
1516   }
1517 #else
1518   const Register arg1_lo = R2_tmp;
1519   const Register arg1_hi = R3_tmp;
1520   const Register arg2_lo = R0_tos_lo;
1521   const Register arg2_hi = R1_tos_hi;
1522 
1523   __ pop_l(arg1_lo, arg1_hi);
1524   switch (op) {
1525     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1526     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1527     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1528     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1529     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1530     default : ShouldNotReachHere();
1531   }
1532 #endif // AARCH64
1533 }
1534 
1535 
1536 void TemplateTable::idiv() {
1537   transition(itos, itos);
1538 #ifdef AARCH64
1539   const Register divisor = R0_tos;
1540   const Register dividend = R1_tmp;
1541 
1542   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1543   __ pop_i(dividend);
1544   __ sdiv_w(R0_tos, dividend, divisor);
1545 #else
1546   __ mov(R2, R0_tos);
1547   __ pop_i(R0);
1548   // R0 - dividend
1549   // R2 - divisor
1550   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1551   // R1 - result
1552   __ mov(R0_tos, R1);
1553 #endif // AARCH64
1554 }
1555 
1556 
1557 void TemplateTable::irem() {
1558   transition(itos, itos);
1559 #ifdef AARCH64
1560   const Register divisor = R0_tos;
1561   const Register dividend = R1_tmp;
1562   const Register quotient = R2_tmp;
1563 
1564   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1565   __ pop_i(dividend);
1566   __ sdiv_w(quotient, dividend, divisor);
1567   __ msub_w(R0_tos, divisor, quotient, dividend);
1568 #else
1569   __ mov(R2, R0_tos);
1570   __ pop_i(R0);
1571   // R0 - dividend
1572   // R2 - divisor
1573   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1574   // R0 - remainder
1575 #endif // AARCH64
1576 }
1577 
1578 
1579 void TemplateTable::lmul() {
1580   transition(ltos, ltos);
1581 #ifdef AARCH64
1582   const Register arg1 = R0_tos;
1583   const Register arg2 = R1_tmp;
1584 
1585   __ pop_l(arg2);
1586   __ mul(R0_tos, arg1, arg2);
1587 #else
1588   const Register arg1_lo = R0_tos_lo;
1589   const Register arg1_hi = R1_tos_hi;
1590   const Register arg2_lo = R2_tmp;
1591   const Register arg2_hi = R3_tmp;
1592 
1593   __ pop_l(arg2_lo, arg2_hi);
1594 
1595   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1596 #endif // AARCH64
1597 }
1598 
1599 
1600 void TemplateTable::ldiv() {
1601   transition(ltos, ltos);
1602 #ifdef AARCH64
1603   const Register divisor = R0_tos;
1604   const Register dividend = R1_tmp;
1605 
1606   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1607   __ pop_l(dividend);
1608   __ sdiv(R0_tos, dividend, divisor);
1609 #else
1610   const Register x_lo = R2_tmp;
1611   const Register x_hi = R3_tmp;
1612   const Register y_lo = R0_tos_lo;
1613   const Register y_hi = R1_tos_hi;
1614 
1615   __ pop_l(x_lo, x_hi);
1616 
1617   // check if y = 0
1618   __ orrs(Rtemp, y_lo, y_hi);
1619   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1620   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1621 #endif // AARCH64
1622 }
1623 
1624 
1625 void TemplateTable::lrem() {
1626   transition(ltos, ltos);
1627 #ifdef AARCH64
1628   const Register divisor = R0_tos;
1629   const Register dividend = R1_tmp;
1630   const Register quotient = R2_tmp;
1631 
1632   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1633   __ pop_l(dividend);
1634   __ sdiv(quotient, dividend, divisor);
1635   __ msub(R0_tos, divisor, quotient, dividend);
1636 #else
1637   const Register x_lo = R2_tmp;
1638   const Register x_hi = R3_tmp;
1639   const Register y_lo = R0_tos_lo;
1640   const Register y_hi = R1_tos_hi;
1641 
1642   __ pop_l(x_lo, x_hi);
1643 
1644   // check if y = 0
1645   __ orrs(Rtemp, y_lo, y_hi);
1646   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1647   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1648 #endif // AARCH64
1649 }
1650 
1651 
1652 void TemplateTable::lshl() {
1653   transition(itos, ltos);
1654 #ifdef AARCH64
1655   const Register val = R1_tmp;
1656   const Register shift_cnt = R0_tos;
1657   __ pop_l(val);
1658   __ lslv(R0_tos, val, shift_cnt);
1659 #else
1660   const Register shift_cnt = R4_tmp;
1661   const Register val_lo = R2_tmp;
1662   const Register val_hi = R3_tmp;
1663 
1664   __ pop_l(val_lo, val_hi);
1665   __ andr(shift_cnt, R0_tos, 63);
1666   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1667 #endif // AARCH64
1668 }
1669 
1670 
1671 void TemplateTable::lshr() {
1672   transition(itos, ltos);
1673 #ifdef AARCH64
1674   const Register val = R1_tmp;
1675   const Register shift_cnt = R0_tos;
1676   __ pop_l(val);
1677   __ asrv(R0_tos, val, shift_cnt);
1678 #else
1679   const Register shift_cnt = R4_tmp;
1680   const Register val_lo = R2_tmp;
1681   const Register val_hi = R3_tmp;
1682 
1683   __ pop_l(val_lo, val_hi);
1684   __ andr(shift_cnt, R0_tos, 63);
1685   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1686 #endif // AARCH64
1687 }
1688 
1689 
1690 void TemplateTable::lushr() {
1691   transition(itos, ltos);
1692 #ifdef AARCH64
1693   const Register val = R1_tmp;
1694   const Register shift_cnt = R0_tos;
1695   __ pop_l(val);
1696   __ lsrv(R0_tos, val, shift_cnt);
1697 #else
1698   const Register shift_cnt = R4_tmp;
1699   const Register val_lo = R2_tmp;
1700   const Register val_hi = R3_tmp;
1701 
1702   __ pop_l(val_lo, val_hi);
1703   __ andr(shift_cnt, R0_tos, 63);
1704   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1705 #endif // AARCH64
1706 }
1707 
1708 
1709 void TemplateTable::fop2(Operation op) {
1710   transition(ftos, ftos);
1711 #ifdef __SOFTFP__
1712   __ mov(R1, R0_tos);
1713   __ pop_i(R0);
1714   switch (op) {
1715     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1716     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1717     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1718     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1719     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1720     default : ShouldNotReachHere();
1721   }
1722 #else
1723   const FloatRegister arg1 = S1_tmp;
1724   const FloatRegister arg2 = S0_tos;
1725 
1726   switch (op) {
1727     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1728     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1729     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1730     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1731     case rem:
1732 #ifndef __ABI_HARD__
1733       __ pop_f(arg1);
1734       __ fmrs(R0, arg1);
1735       __ fmrs(R1, arg2);
1736       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1737       __ fmsr(S0_tos, R0);
1738 #else
1739       __ mov_float(S1_reg, arg2);
1740       __ pop_f(S0);
1741       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1742 #endif // !__ABI_HARD__
1743       break;
1744     default : ShouldNotReachHere();
1745   }
1746 #endif // __SOFTFP__
1747 }
1748 
1749 
1750 void TemplateTable::dop2(Operation op) {
1751   transition(dtos, dtos);
1752 #ifdef __SOFTFP__
1753   __ mov(R2, R0_tos_lo);
1754   __ mov(R3, R1_tos_hi);
1755   __ pop_l(R0, R1);
1756   switch (op) {
1757     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1758     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1759     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1760     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1761     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1762     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1763     default : ShouldNotReachHere();
1764   }
1765 #else
1766   const FloatRegister arg1 = D1_tmp;
1767   const FloatRegister arg2 = D0_tos;
1768 
1769   switch (op) {
1770     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1771     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1772     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1773     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1774     case rem:
1775 #ifndef __ABI_HARD__
1776       __ pop_d(arg1);
1777       __ fmrrd(R0, R1, arg1);
1778       __ fmrrd(R2, R3, arg2);
1779       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1780       __ fmdrr(D0_tos, R0, R1);
1781 #else
1782       __ mov_double(D1, arg2);
1783       __ pop_d(D0);
1784       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1785 #endif // !__ABI_HARD__
1786       break;
1787     default : ShouldNotReachHere();
1788   }
1789 #endif // __SOFTFP__
1790 }
1791 
1792 
1793 void TemplateTable::ineg() {
1794   transition(itos, itos);
1795   __ neg_32(R0_tos, R0_tos);
1796 }
1797 
1798 
1799 void TemplateTable::lneg() {
1800   transition(ltos, ltos);
1801 #ifdef AARCH64
1802   __ neg(R0_tos, R0_tos);
1803 #else
1804   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1805   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1806 #endif // AARCH64
1807 }
1808 
1809 
1810 void TemplateTable::fneg() {
1811   transition(ftos, ftos);
1812 #ifdef __SOFTFP__
1813   // Invert sign bit
1814   const int sign_mask = 0x80000000;
1815   __ eor(R0_tos, R0_tos, sign_mask);
1816 #else
1817   __ neg_float(S0_tos, S0_tos);
1818 #endif // __SOFTFP__
1819 }
1820 
1821 
1822 void TemplateTable::dneg() {
1823   transition(dtos, dtos);
1824 #ifdef __SOFTFP__
1825   // Invert sign bit in the high part of the double
1826   const int sign_mask_hi = 0x80000000;
1827   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1828 #else
1829   __ neg_double(D0_tos, D0_tos);
1830 #endif // __SOFTFP__
1831 }
1832 
1833 
1834 void TemplateTable::iinc() {
1835   transition(vtos, vtos);
1836   const Register Rconst = R2_tmp;
1837   const Register Rlocal_index = R1_tmp;
1838   const Register Rval = R0_tmp;
1839 
1840   __ ldrsb(Rconst, at_bcp(2));
1841   locals_index(Rlocal_index);
1842   Address local = load_iaddress(Rlocal_index, Rtemp);
1843   __ ldr_s32(Rval, local);
1844   __ add(Rval, Rval, Rconst);
1845   __ str_32(Rval, local);
1846 }
1847 
1848 
1849 void TemplateTable::wide_iinc() {
1850   transition(vtos, vtos);
1851   const Register Rconst = R2_tmp;
1852   const Register Rlocal_index = R1_tmp;
1853   const Register Rval = R0_tmp;
1854 
1855   // get constant in Rconst
1856   __ ldrsb(R2_tmp, at_bcp(4));
1857   __ ldrb(R3_tmp, at_bcp(5));
1858   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1859 
1860   locals_index_wide(Rlocal_index);
1861   Address local = load_iaddress(Rlocal_index, Rtemp);
1862   __ ldr_s32(Rval, local);
1863   __ add(Rval, Rval, Rconst);
1864   __ str_32(Rval, local);
1865 }
1866 
1867 
1868 void TemplateTable::convert() {
1869   // Checking
1870 #ifdef ASSERT
1871   { TosState tos_in  = ilgl;
1872     TosState tos_out = ilgl;
1873     switch (bytecode()) {
1874       case Bytecodes::_i2l: // fall through
1875       case Bytecodes::_i2f: // fall through
1876       case Bytecodes::_i2d: // fall through
1877       case Bytecodes::_i2b: // fall through
1878       case Bytecodes::_i2c: // fall through
1879       case Bytecodes::_i2s: tos_in = itos; break;
1880       case Bytecodes::_l2i: // fall through
1881       case Bytecodes::_l2f: // fall through
1882       case Bytecodes::_l2d: tos_in = ltos; break;
1883       case Bytecodes::_f2i: // fall through
1884       case Bytecodes::_f2l: // fall through
1885       case Bytecodes::_f2d: tos_in = ftos; break;
1886       case Bytecodes::_d2i: // fall through
1887       case Bytecodes::_d2l: // fall through
1888       case Bytecodes::_d2f: tos_in = dtos; break;
1889       default             : ShouldNotReachHere();
1890     }
1891     switch (bytecode()) {
1892       case Bytecodes::_l2i: // fall through
1893       case Bytecodes::_f2i: // fall through
1894       case Bytecodes::_d2i: // fall through
1895       case Bytecodes::_i2b: // fall through
1896       case Bytecodes::_i2c: // fall through
1897       case Bytecodes::_i2s: tos_out = itos; break;
1898       case Bytecodes::_i2l: // fall through
1899       case Bytecodes::_f2l: // fall through
1900       case Bytecodes::_d2l: tos_out = ltos; break;
1901       case Bytecodes::_i2f: // fall through
1902       case Bytecodes::_l2f: // fall through
1903       case Bytecodes::_d2f: tos_out = ftos; break;
1904       case Bytecodes::_i2d: // fall through
1905       case Bytecodes::_l2d: // fall through
1906       case Bytecodes::_f2d: tos_out = dtos; break;
1907       default             : ShouldNotReachHere();
1908     }
1909     transition(tos_in, tos_out);
1910   }
1911 #endif // ASSERT
1912 
1913   // Conversion
1914   switch (bytecode()) {
1915     case Bytecodes::_i2l:
1916 #ifdef AARCH64
1917       __ sign_extend(R0_tos, R0_tos, 32);
1918 #else
1919       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1920 #endif // AARCH64
1921       break;
1922 
1923     case Bytecodes::_i2f:
1924 #ifdef AARCH64
1925       __ scvtf_sw(S0_tos, R0_tos);
1926 #else
1927 #ifdef __SOFTFP__
1928       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1929 #else
1930       __ fmsr(S0_tmp, R0_tos);
1931       __ fsitos(S0_tos, S0_tmp);
1932 #endif // __SOFTFP__
1933 #endif // AARCH64
1934       break;
1935 
1936     case Bytecodes::_i2d:
1937 #ifdef AARCH64
1938       __ scvtf_dw(D0_tos, R0_tos);
1939 #else
1940 #ifdef __SOFTFP__
1941       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1942 #else
1943       __ fmsr(S0_tmp, R0_tos);
1944       __ fsitod(D0_tos, S0_tmp);
1945 #endif // __SOFTFP__
1946 #endif // AARCH64
1947       break;
1948 
1949     case Bytecodes::_i2b:
1950       __ sign_extend(R0_tos, R0_tos, 8);
1951       break;
1952 
1953     case Bytecodes::_i2c:
1954       __ zero_extend(R0_tos, R0_tos, 16);
1955       break;
1956 
1957     case Bytecodes::_i2s:
1958       __ sign_extend(R0_tos, R0_tos, 16);
1959       break;
1960 
1961     case Bytecodes::_l2i:
1962       /* nothing to do */
1963       break;
1964 
1965     case Bytecodes::_l2f:
1966 #ifdef AARCH64
1967       __ scvtf_sx(S0_tos, R0_tos);
1968 #else
1969       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1970 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1971       __ fmsr(S0_tos, R0);
1972 #endif // !__SOFTFP__ && !__ABI_HARD__
1973 #endif // AARCH64
1974       break;
1975 
1976     case Bytecodes::_l2d:
1977 #ifdef AARCH64
1978       __ scvtf_dx(D0_tos, R0_tos);
1979 #else
1980       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1981 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1982       __ fmdrr(D0_tos, R0, R1);
1983 #endif // !__SOFTFP__ && !__ABI_HARD__
1984 #endif // AARCH64
1985       break;
1986 
1987     case Bytecodes::_f2i:
1988 #ifdef AARCH64
1989       __ fcvtzs_ws(R0_tos, S0_tos);
1990 #else
1991 #ifndef __SOFTFP__
1992       __ ftosizs(S0_tos, S0_tos);
1993       __ fmrs(R0_tos, S0_tos);
1994 #else
1995       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1996 #endif // !__SOFTFP__
1997 #endif // AARCH64
1998       break;
1999 
2000     case Bytecodes::_f2l:
2001 #ifdef AARCH64
2002       __ fcvtzs_xs(R0_tos, S0_tos);
2003 #else
2004 #ifndef __SOFTFP__
2005       __ fmrs(R0_tos, S0_tos);
2006 #endif // !__SOFTFP__
2007       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
2008 #endif // AARCH64
2009       break;
2010 
2011     case Bytecodes::_f2d:
2012 #ifdef __SOFTFP__
2013       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
2014 #else
2015       __ convert_f2d(D0_tos, S0_tos);
2016 #endif // __SOFTFP__
2017       break;
2018 
2019     case Bytecodes::_d2i:
2020 #ifdef AARCH64
2021       __ fcvtzs_wd(R0_tos, D0_tos);
2022 #else
2023 #ifndef __SOFTFP__
2024       __ ftosizd(Stemp, D0);
2025       __ fmrs(R0, Stemp);
2026 #else
2027       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
2028 #endif // !__SOFTFP__
2029 #endif // AARCH64
2030       break;
2031 
2032     case Bytecodes::_d2l:
2033 #ifdef AARCH64
2034       __ fcvtzs_xd(R0_tos, D0_tos);
2035 #else
2036 #ifndef __SOFTFP__
2037       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
2038 #endif // !__SOFTFP__
2039       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
2040 #endif // AARCH64
2041       break;
2042 
2043     case Bytecodes::_d2f:
2044 #ifdef __SOFTFP__
2045       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
2046 #else
2047       __ convert_d2f(S0_tos, D0_tos);
2048 #endif // __SOFTFP__
2049       break;
2050 
2051     default:
2052       ShouldNotReachHere();
2053   }
2054 }
2055 
2056 
2057 void TemplateTable::lcmp() {
2058   transition(ltos, itos);
2059 #ifdef AARCH64
2060   const Register arg1 = R1_tmp;
2061   const Register arg2 = R0_tos;
2062 
2063   __ pop_l(arg1);
2064 
2065   __ cmp(arg1, arg2);
2066   __ cset(R0_tos, gt);               // 1 if '>', else 0
2067   __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2068 #else
2069   const Register arg1_lo = R2_tmp;
2070   const Register arg1_hi = R3_tmp;
2071   const Register arg2_lo = R0_tos_lo;
2072   const Register arg2_hi = R1_tos_hi;
2073   const Register res = R4_tmp;
2074 
2075   __ pop_l(arg1_lo, arg1_hi);
2076 
2077   // long compare arg1 with arg2
2078   // result is -1/0/+1 if '<'/'='/'>'
2079   Label done;
2080 
2081   __ mov (res, 0);
2082   __ cmp (arg1_hi, arg2_hi);
2083   __ mvn (res, 0, lt);
2084   __ mov (res, 1, gt);
2085   __ b(done, ne);
2086   __ cmp (arg1_lo, arg2_lo);
2087   __ mvn (res, 0, lo);
2088   __ mov (res, 1, hi);
2089   __ bind(done);
2090   __ mov (R0_tos, res);
2091 #endif // AARCH64
2092 }
2093 
2094 
2095 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2096   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2097 
2098 #ifdef AARCH64
2099   if (is_float) {
2100     transition(ftos, itos);
2101     __ pop_f(S1_tmp);
2102     __ fcmp_s(S1_tmp, S0_tos);
2103   } else {
2104     transition(dtos, itos);
2105     __ pop_d(D1_tmp);
2106     __ fcmp_d(D1_tmp, D0_tos);
2107   }
2108 
2109   if (unordered_result < 0) {
2110     __ cset(R0_tos, gt);               // 1 if '>', else 0
2111     __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2112   } else {
2113     __ cset(R0_tos, hi);               // 1 if '>' or unordered, else 0
2114     __ csinv(R0_tos, R0_tos, ZR, pl);  // previous value if '>=' or unordered, else -1
2115   }
2116 
2117 #else
2118 
2119 #ifdef __SOFTFP__
2120 
2121   if (is_float) {
2122     transition(ftos, itos);
2123     const Register Rx = R0;
2124     const Register Ry = R1;
2125 
2126     __ mov(Ry, R0_tos);
2127     __ pop_i(Rx);
2128 
2129     if (unordered_result == 1) {
2130       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2131     } else {
2132       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2133     }
2134 
2135   } else {
2136 
2137     transition(dtos, itos);
2138     const Register Rx_lo = R0;
2139     const Register Rx_hi = R1;
2140     const Register Ry_lo = R2;
2141     const Register Ry_hi = R3;
2142 
2143     __ mov(Ry_lo, R0_tos_lo);
2144     __ mov(Ry_hi, R1_tos_hi);
2145     __ pop_l(Rx_lo, Rx_hi);
2146 
2147     if (unordered_result == 1) {
2148       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2149     } else {
2150       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2151     }
2152   }
2153 
2154 #else
2155 
2156   if (is_float) {
2157     transition(ftos, itos);
2158     __ pop_f(S1_tmp);
2159     __ fcmps(S1_tmp, S0_tos);
2160   } else {
2161     transition(dtos, itos);
2162     __ pop_d(D1_tmp);
2163     __ fcmpd(D1_tmp, D0_tos);
2164   }
2165 
2166   __ fmstat();
2167 
2168   // comparison result | flag N | flag Z | flag C | flag V
2169   // "<"               |   1    |   0    |   0    |   0
2170   // "=="              |   0    |   1    |   1    |   0
2171   // ">"               |   0    |   0    |   1    |   0
2172   // unordered         |   0    |   0    |   1    |   1
2173 
2174   if (unordered_result < 0) {
2175     __ mov(R0_tos, 1);           // result ==  1 if greater
2176     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2177   } else {
2178     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2179     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2180   }
2181   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2182 #endif // __SOFTFP__
2183 #endif // AARCH64
2184 }
2185 
2186 
2187 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2188 
2189   const Register Rdisp = R0_tmp;
2190   const Register Rbumped_taken_count = R5_tmp;
2191 
2192   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2193 
2194   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2195                              InvocationCounter::counter_offset();
2196   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2197                               InvocationCounter::counter_offset();
2198   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2199 
2200   // Load up R0 with the branch displacement
2201   if (is_wide) {
2202     __ ldrsb(R0_tmp, at_bcp(1));
2203     __ ldrb(R1_tmp, at_bcp(2));
2204     __ ldrb(R2_tmp, at_bcp(3));
2205     __ ldrb(R3_tmp, at_bcp(4));
2206     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2207     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2208     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2209   } else {
2210     __ ldrsb(R0_tmp, at_bcp(1));
2211     __ ldrb(R1_tmp, at_bcp(2));
2212     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2213   }
2214 
2215   // Handle all the JSR stuff here, then exit.
2216   // It's much shorter and cleaner than intermingling with the
2217   // non-JSR normal-branch stuff occuring below.
2218   if (is_jsr) {
2219     // compute return address as bci in R1
2220     const Register Rret_addr = R1_tmp;
2221     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2222 
2223     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2224     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2225     __ sub(Rret_addr, Rret_addr, Rtemp);
2226 
2227     // Load the next target bytecode into R3_bytecode and advance Rbcp
2228 #ifdef AARCH64
2229     __ add(Rbcp, Rbcp, Rdisp);
2230     __ ldrb(R3_bytecode, Address(Rbcp));
2231 #else
2232     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2233 #endif // AARCH64
2234 
2235     // Push return address
2236     __ push_i(Rret_addr);
2237     // jsr returns vtos
2238     __ dispatch_only_noverify(vtos);
2239     return;
2240   }
2241 
2242   // Normal (non-jsr) branch handling
2243 
2244   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2245 #ifdef AARCH64
2246   __ add(Rbcp, Rbcp, Rdisp);
2247   __ ldrb(R3_bytecode, Address(Rbcp));
2248 #else
2249   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2250 #endif // AARCH64
2251 
2252   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2253   Label backedge_counter_overflow;
2254   Label profile_method;
2255   Label dispatch;
2256 
2257   if (UseLoopCounter) {
2258     // increment backedge counter for backward branches
2259     // Rdisp (R0): target offset
2260 
2261     const Register Rcnt = R2_tmp;
2262     const Register Rcounters = R1_tmp;
2263 
2264     // count only if backward branch
2265 #ifdef AARCH64
2266     __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2267 #else
2268     __ tst(Rdisp, Rdisp);
2269     __ b(dispatch, pl);
2270 #endif // AARCH64
2271 
2272     if (TieredCompilation) {
2273       Label no_mdo;
2274       int increment = InvocationCounter::count_increment;
2275       if (ProfileInterpreter) {
2276         // Are we profiling?
2277         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2278         __ cbz(Rtemp, no_mdo);
2279         // Increment the MDO backedge counter
2280         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2281                                                   in_bytes(InvocationCounter::counter_offset()));
2282         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2283         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2284                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2285         __ b(dispatch);
2286       }
2287       __ bind(no_mdo);
2288       // Increment backedge counter in MethodCounters*
2289       // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2290       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2291                              Rdisp, R3_bytecode,
2292                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2293       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2294       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2295                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2296     } else {
2297       // Increment backedge counter in MethodCounters*
2298       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2299                              Rdisp, R3_bytecode,
2300                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2301       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2302       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2303       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2304 
2305       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2306 #ifdef AARCH64
2307       __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value);  // and the status bits
2308 #else
2309       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2310 #endif // AARCH64
2311       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2312 
2313       if (ProfileInterpreter) {
2314         // Test to see if we should create a method data oop
2315         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2316         __ ldr_s32(Rtemp, profile_limit);
2317         __ cmp_32(Rcnt, Rtemp);
2318         __ b(dispatch, lt);
2319 
2320         // if no method data exists, go to profile method
2321         __ test_method_data_pointer(R4_tmp, profile_method);
2322 
2323         if (UseOnStackReplacement) {
2324           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2325           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2326           __ ldr_s32(Rtemp, backward_branch_limit);
2327           __ cmp(Rbumped_taken_count, Rtemp);
2328           __ b(dispatch, lo);
2329 
2330           // When ProfileInterpreter is on, the backedge_count comes from the
2331           // MethodData*, which value does not get reset on the call to
2332           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2333           // routine while the method is being compiled, add a second test to make
2334           // sure the overflow function is called only once every overflow_frequency.
2335           const int overflow_frequency = 1024;
2336 
2337 #ifdef AARCH64
2338           __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2339 #else
2340           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2341           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2342           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2343 #endif // AARCH64
2344 
2345           __ b(backedge_counter_overflow, eq);
2346         }
2347       } else {
2348         if (UseOnStackReplacement) {
2349           // check for overflow against Rcnt, which is the sum of the counters
2350           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2351           __ ldr_s32(Rtemp, backward_branch_limit);
2352           __ cmp_32(Rcnt, Rtemp);
2353           __ b(backedge_counter_overflow, hs);
2354 
2355         }
2356       }
2357     }
2358     __ bind(dispatch);
2359   }
2360 
2361   if (!UseOnStackReplacement) {
2362     __ bind(backedge_counter_overflow);
2363   }
2364 
2365   // continue with the bytecode @ target
2366   __ dispatch_only(vtos);
2367 
2368   if (UseLoopCounter) {
2369     if (ProfileInterpreter) {
2370       // Out-of-line code to allocate method data oop.
2371       __ bind(profile_method);
2372 
2373       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2374       __ set_method_data_pointer_for_bcp();
2375       // reload next bytecode
2376       __ ldrb(R3_bytecode, Address(Rbcp));
2377       __ b(dispatch);
2378     }
2379 
2380     if (UseOnStackReplacement) {
2381       // invocation counter overflow
2382       __ bind(backedge_counter_overflow);
2383 
2384       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2385       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2386 
2387       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2388       const Register Rnmethod = R0;
2389 
2390       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2391 
2392       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2393 
2394       // nmethod may have been invalidated (VM may block upon call_VM return)
2395       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2396       __ cmp(R1_tmp, nmethod::in_use);
2397       __ b(dispatch, ne);
2398 
2399       // We have the address of an on stack replacement routine in Rnmethod,
2400       // We need to prepare to execute the OSR method. First we must
2401       // migrate the locals and monitors off of the stack.
2402 
2403       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2404 
2405       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2406 
2407       // R0 is OSR buffer
2408 
2409       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2410       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2411 
2412 #ifdef AARCH64
2413       __ ldp(FP, LR, Address(FP));
2414       __ mov(SP, Rtemp);
2415 #else
2416       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2417       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2418 #endif // AARCH64
2419 
2420       __ jump(R1_tmp);
2421     }
2422   }
2423 }
2424 
2425 
2426 void TemplateTable::if_0cmp(Condition cc) {
2427   transition(itos, vtos);
2428   // assume branch is more often taken than not (loops use backward branches)
2429   Label not_taken;
2430 #ifdef AARCH64
2431   if (cc == equal) {
2432     __ cbnz_w(R0_tos, not_taken);
2433   } else if (cc == not_equal) {
2434     __ cbz_w(R0_tos, not_taken);
2435   } else {
2436     __ cmp_32(R0_tos, 0);
2437     __ b(not_taken, convNegCond(cc));
2438   }
2439 #else
2440   __ cmp_32(R0_tos, 0);
2441   __ b(not_taken, convNegCond(cc));
2442 #endif // AARCH64
2443   branch(false, false);
2444   __ bind(not_taken);
2445   __ profile_not_taken_branch(R0_tmp);
2446 }
2447 
2448 
2449 void TemplateTable::if_icmp(Condition cc) {
2450   transition(itos, vtos);
2451   // assume branch is more often taken than not (loops use backward branches)
2452   Label not_taken;
2453   __ pop_i(R1_tmp);
2454   __ cmp_32(R1_tmp, R0_tos);
2455   __ b(not_taken, convNegCond(cc));
2456   branch(false, false);
2457   __ bind(not_taken);
2458   __ profile_not_taken_branch(R0_tmp);
2459 }
2460 
2461 
2462 void TemplateTable::if_nullcmp(Condition cc) {
2463   transition(atos, vtos);
2464   assert(cc == equal || cc == not_equal, "invalid condition");
2465 
2466   // assume branch is more often taken than not (loops use backward branches)
2467   Label not_taken;
2468   if (cc == equal) {
2469     __ cbnz(R0_tos, not_taken);
2470   } else {
2471     __ cbz(R0_tos, not_taken);
2472   }
2473   branch(false, false);
2474   __ bind(not_taken);
2475   __ profile_not_taken_branch(R0_tmp);
2476 }
2477 
2478 
2479 void TemplateTable::if_acmp(Condition cc) {
2480   transition(atos, vtos);
2481   // assume branch is more often taken than not (loops use backward branches)
2482   Label not_taken;
2483   __ pop_ptr(R1_tmp);
2484   __ cmp(R1_tmp, R0_tos);
2485   __ b(not_taken, convNegCond(cc));
2486   branch(false, false);
2487   __ bind(not_taken);
2488   __ profile_not_taken_branch(R0_tmp);
2489 }
2490 
2491 
2492 void TemplateTable::ret() {
2493   transition(vtos, vtos);
2494   const Register Rlocal_index = R1_tmp;
2495   const Register Rret_bci = Rtmp_save0; // R4/R19
2496 
2497   locals_index(Rlocal_index);
2498   Address local = load_iaddress(Rlocal_index, Rtemp);
2499   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2500   __ profile_ret(Rtmp_save1, Rret_bci);
2501   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2502   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2503   __ add(Rbcp, Rtemp, Rret_bci);
2504   __ dispatch_next(vtos);
2505 }
2506 
2507 
2508 void TemplateTable::wide_ret() {
2509   transition(vtos, vtos);
2510   const Register Rlocal_index = R1_tmp;
2511   const Register Rret_bci = Rtmp_save0; // R4/R19
2512 
2513   locals_index_wide(Rlocal_index);
2514   Address local = load_iaddress(Rlocal_index, Rtemp);
2515   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2516   __ profile_ret(Rtmp_save1, Rret_bci);
2517   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2518   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2519   __ add(Rbcp, Rtemp, Rret_bci);
2520   __ dispatch_next(vtos);
2521 }
2522 
2523 
2524 void TemplateTable::tableswitch() {
2525   transition(itos, vtos);
2526 
2527   const Register Rindex  = R0_tos;
2528 #ifndef AARCH64
2529   const Register Rtemp2  = R1_tmp;
2530 #endif // !AARCH64
2531   const Register Rabcp   = R2_tmp;  // aligned bcp
2532   const Register Rlow    = R3_tmp;
2533   const Register Rhigh   = R4_tmp;
2534   const Register Roffset = R5_tmp;
2535 
2536   // align bcp
2537   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2538   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2539 
2540   // load lo & hi
2541 #ifdef AARCH64
2542   __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2543 #else
2544   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2545 #endif // AARCH64
2546   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2547   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2548 
2549   // compare index with high bound
2550   __ cmp_32(Rhigh, Rindex);
2551 
2552 #ifdef AARCH64
2553   Label default_case, do_dispatch;
2554   __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2555   __ b(default_case, lt);
2556 
2557   __ sub_w(Rindex, Rindex, Rlow);
2558   __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2559   if(ProfileInterpreter) {
2560     __ sxtw(Rindex, Rindex);
2561     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2562   }
2563   __ b(do_dispatch);
2564 
2565   __ bind(default_case);
2566   __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2567   if(ProfileInterpreter) {
2568     __ profile_switch_default(R0_tmp);
2569   }
2570 
2571   __ bind(do_dispatch);
2572 #else
2573 
2574   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2575   __ subs(Rindex, Rindex, Rlow, ge);
2576 
2577   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2578   // ("ge" status accumulated from cmp and subs instructions) then load
2579   // offset from table, otherwise load offset for default case
2580 
2581   if(ProfileInterpreter) {
2582     Label default_case, continue_execution;
2583 
2584     __ b(default_case, lt);
2585     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2586     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2587     __ b(continue_execution);
2588 
2589     __ bind(default_case);
2590     __ profile_switch_default(R0_tmp);
2591     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2592 
2593     __ bind(continue_execution);
2594   } else {
2595     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2596     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2597   }
2598 #endif // AARCH64
2599 
2600   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2601 
2602   // load the next bytecode to R3_bytecode and advance Rbcp
2603 #ifdef AARCH64
2604   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2605   __ ldrb(R3_bytecode, Address(Rbcp));
2606 #else
2607   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2608 #endif // AARCH64
2609   __ dispatch_only(vtos);
2610 
2611 }
2612 
2613 
2614 void TemplateTable::lookupswitch() {
2615   transition(itos, itos);
2616   __ stop("lookupswitch bytecode should have been rewritten");
2617 }
2618 
2619 
2620 void TemplateTable::fast_linearswitch() {
2621   transition(itos, vtos);
2622   Label loop, found, default_case, continue_execution;
2623 
2624   const Register Rkey     = R0_tos;
2625   const Register Rabcp    = R2_tmp;  // aligned bcp
2626   const Register Rdefault = R3_tmp;
2627   const Register Rcount   = R4_tmp;
2628   const Register Roffset  = R5_tmp;
2629 
2630   // bswap Rkey, so we can avoid bswapping the table entries
2631   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2632 
2633   // align bcp
2634   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2635   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2636 
2637   // load default & counter
2638 #ifdef AARCH64
2639   __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2640 #else
2641   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2642 #endif // AARCH64
2643   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2644 
2645 #ifdef AARCH64
2646   __ cbz_w(Rcount, default_case);
2647 #else
2648   __ cmp_32(Rcount, 0);
2649   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2650   __ b(default_case, eq);
2651 #endif // AARCH64
2652 
2653   // table search
2654   __ bind(loop);
2655 #ifdef AARCH64
2656   __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2657 #endif // AARCH64
2658   __ cmp_32(Rtemp, Rkey);
2659   __ b(found, eq);
2660   __ subs(Rcount, Rcount, 1);
2661 #ifndef AARCH64
2662   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2663 #endif // !AARCH64
2664   __ b(loop, ne);
2665 
2666   // default case
2667   __ bind(default_case);
2668   __ profile_switch_default(R0_tmp);
2669   __ mov(Roffset, Rdefault);
2670   __ b(continue_execution);
2671 
2672   // entry found -> get offset
2673   __ bind(found);
2674   // Rabcp is already incremented and points to the next entry
2675   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2676   if (ProfileInterpreter) {
2677     // Calculate index of the selected case.
2678     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2679 
2680     // align bcp
2681     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2682     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2683 
2684     // load number of cases
2685     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2686     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2687 
2688     // Selected index = <number of cases> - <current loop count>
2689     __ sub(R1_tmp, R2_tmp, Rcount);
2690     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2691   }
2692 
2693   // continue execution
2694   __ bind(continue_execution);
2695   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2696 
2697   // load the next bytecode to R3_bytecode and advance Rbcp
2698 #ifdef AARCH64
2699   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2700   __ ldrb(R3_bytecode, Address(Rbcp));
2701 #else
2702   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2703 #endif // AARCH64
2704   __ dispatch_only(vtos);
2705 }
2706 
2707 
2708 void TemplateTable::fast_binaryswitch() {
2709   transition(itos, vtos);
2710   // Implementation using the following core algorithm:
2711   //
2712   // int binary_search(int key, LookupswitchPair* array, int n) {
2713   //   // Binary search according to "Methodik des Programmierens" by
2714   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2715   //   int i = 0;
2716   //   int j = n;
2717   //   while (i+1 < j) {
2718   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2719   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2720   //     // where a stands for the array and assuming that the (inexisting)
2721   //     // element a[n] is infinitely big.
2722   //     int h = (i + j) >> 1;
2723   //     // i < h < j
2724   //     if (key < array[h].fast_match()) {
2725   //       j = h;
2726   //     } else {
2727   //       i = h;
2728   //     }
2729   //   }
2730   //   // R: a[i] <= key < a[i+1] or Q
2731   //   // (i.e., if key is within array, i is the correct index)
2732   //   return i;
2733   // }
2734 
2735   // register allocation
2736   const Register key    = R0_tos;                // already set (tosca)
2737   const Register array  = R1_tmp;
2738   const Register i      = R2_tmp;
2739   const Register j      = R3_tmp;
2740   const Register h      = R4_tmp;
2741   const Register val    = R5_tmp;
2742   const Register temp1  = Rtemp;
2743   const Register temp2  = LR_tmp;
2744   const Register offset = R3_tmp;
2745 
2746   // set 'array' = aligned bcp + 2 ints
2747   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2748   __ align_reg(array, temp1, BytesPerInt);
2749 
2750   // initialize i & j
2751   __ mov(i, 0);                                  // i = 0;
2752   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2753   // Convert j into native byteordering
2754   __ byteswap_u32(j, temp1, temp2);
2755 
2756   // and start
2757   Label entry;
2758   __ b(entry);
2759 
2760   // binary search loop
2761   { Label loop;
2762     __ bind(loop);
2763     // int h = (i + j) >> 1;
2764     __ add(h, i, j);                             // h = i + j;
2765     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2766     // if (key < array[h].fast_match()) {
2767     //   j = h;
2768     // } else {
2769     //   i = h;
2770     // }
2771 #ifdef AARCH64
2772     __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2773     __ ldr_s32(val, Address(temp1));
2774 #else
2775     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2776 #endif // AARCH64
2777     // Convert array[h].match to native byte-ordering before compare
2778     __ byteswap_u32(val, temp1, temp2);
2779     __ cmp_32(key, val);
2780     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2781     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2782     // while (i+1 < j)
2783     __ bind(entry);
2784     __ add(temp1, i, 1);                             // i+1
2785     __ cmp(temp1, j);                                // i+1 < j
2786     __ b(loop, lt);
2787   }
2788 
2789   // end of binary search, result index is i (must check again!)
2790   Label default_case;
2791   // Convert array[i].match to native byte-ordering before compare
2792 #ifdef AARCH64
2793   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2794   __ ldr_s32(val, Address(temp1));
2795 #else
2796   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2797 #endif // AARCH64
2798   __ byteswap_u32(val, temp1, temp2);
2799   __ cmp_32(key, val);
2800   __ b(default_case, ne);
2801 
2802   // entry found
2803   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2804   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2805   __ profile_switch_case(R0, i, R1, i);
2806   __ byteswap_u32(offset, temp1, temp2);
2807 #ifdef AARCH64
2808   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2809   __ ldrb(R3_bytecode, Address(Rbcp));
2810 #else
2811   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2812 #endif // AARCH64
2813   __ dispatch_only(vtos);
2814 
2815   // default case
2816   __ bind(default_case);
2817   __ profile_switch_default(R0);
2818   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2819   __ byteswap_u32(offset, temp1, temp2);
2820 #ifdef AARCH64
2821   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2822   __ ldrb(R3_bytecode, Address(Rbcp));
2823 #else
2824   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2825 #endif // AARCH64
2826   __ dispatch_only(vtos);
2827 }
2828 
2829 
2830 void TemplateTable::_return(TosState state) {
2831   transition(state, state);
2832   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2833 
2834   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2835     Label skip_register_finalizer;
2836     assert(state == vtos, "only valid state");
2837     __ ldr(R1, aaddress(0));
2838     __ load_klass(Rtemp, R1);
2839     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2840     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2841 
2842     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2843 
2844     __ bind(skip_register_finalizer);
2845   }
2846 
2847   // Narrow result if state is itos but result type is smaller.
2848   // Need to narrow in the return bytecode rather than in generate_return_entry
2849   // since compiled code callers expect the result to already be narrowed.
2850   if (state == itos) {
2851     __ narrow(R0_tos);
2852   }
2853   __ remove_activation(state, LR);
2854 
2855   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2856 
2857 #ifndef AARCH64
2858   // According to interpreter calling conventions, result is returned in R0/R1,
2859   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2860   // This conversion should be done after remove_activation, as it uses
2861   // push(state) & pop(state) to preserve return value.
2862   __ convert_tos_to_retval(state);
2863 #endif // !AARCH64
2864 
2865   __ ret();
2866 
2867   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2868   __ nop();
2869 }
2870 
2871 
2872 // ----------------------------------------------------------------------------
2873 // Volatile variables demand their effects be made known to all CPU's in
2874 // order.  Store buffers on most chips allow reads & writes to reorder; the
2875 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2876 // memory barrier (i.e., it's not sufficient that the interpreter does not
2877 // reorder volatile references, the hardware also must not reorder them).
2878 //
2879 // According to the new Java Memory Model (JMM):
2880 // (1) All volatiles are serialized wrt to each other.
2881 // ALSO reads & writes act as aquire & release, so:
2882 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2883 // the read float up to before the read.  It's OK for non-volatile memory refs
2884 // that happen before the volatile read to float down below it.
2885 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2886 // that happen BEFORE the write float down to after the write.  It's OK for
2887 // non-volatile memory refs that happen after the volatile write to float up
2888 // before it.
2889 //
2890 // We only put in barriers around volatile refs (they are expensive), not
2891 // _between_ memory refs (that would require us to track the flavor of the
2892 // previous memory refs).  Requirements (2) and (3) require some barriers
2893 // before volatile stores and after volatile loads.  These nearly cover
2894 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2895 // case is placed after volatile-stores although it could just as well go
2896 // before volatile-loads.
2897 // TODO-AARCH64: consider removing extra unused parameters
2898 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2899                                      Register tmp,
2900                                      bool preserve_flags,
2901                                      Register load_tgt) {
2902 #ifdef AARCH64
2903   __ membar(order_constraint);
2904 #else
2905   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2906 #endif
2907 }
2908 
2909 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2910 void TemplateTable::resolve_cache_and_index(int byte_no,
2911                                             Register Rcache,
2912                                             Register Rindex,
2913                                             size_t index_size) {
2914   assert_different_registers(Rcache, Rindex, Rtemp);
2915 
2916   Label resolved;
2917   Bytecodes::Code code = bytecode();
2918   switch (code) {
2919   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2920   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2921   }
2922 
2923   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2924   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
2925   __ cmp(Rtemp, code);  // have we resolved this bytecode?
2926   __ b(resolved, eq);
2927 
2928   // resolve first time through
2929   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2930   __ mov(R1, code);
2931   __ call_VM(noreg, entry, R1);
2932   // Update registers with resolved info
2933   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
2934   __ bind(resolved);
2935 }
2936 
2937 
2938 // The Rcache and Rindex registers must be set before call
2939 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
2940                                               Register Rindex,
2941                                               Register Roffset,
2942                                               Register Rflags,
2943                                               Register Robj,
2944                                               bool is_static = false) {
2945 
2946   assert_different_registers(Rcache, Rindex, Rtemp);
2947   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
2948 
2949   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2950 
2951   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2952 
2953   // Field offset
2954   __ ldr(Roffset, Address(Rtemp,
2955            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2956 
2957   // Flags
2958   __ ldr_u32(Rflags, Address(Rtemp,
2959            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2960 
2961   if (is_static) {
2962     __ ldr(Robj, Address(Rtemp,
2963              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2964     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2965     __ ldr(Robj, Address(Robj, mirror_offset));
2966     __ resolve_oop_handle(Robj);
2967   }
2968 }
2969 
2970 
2971 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2972 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2973                                                Register method,
2974                                                Register itable_index,
2975                                                Register flags,
2976                                                bool is_invokevirtual,
2977                                                bool is_invokevfinal/*unused*/,
2978                                                bool is_invokedynamic) {
2979   // setup registers
2980   const Register cache = R2_tmp;
2981   const Register index = R3_tmp;
2982   const Register temp_reg = Rtemp;
2983   assert_different_registers(cache, index, temp_reg);
2984   assert_different_registers(method, itable_index, temp_reg);
2985 
2986   // determine constant pool cache field offsets
2987   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2988   const int method_offset = in_bytes(
2989     ConstantPoolCache::base_offset() +
2990       ((byte_no == f2_byte)
2991        ? ConstantPoolCacheEntry::f2_offset()
2992        : ConstantPoolCacheEntry::f1_offset()
2993       )
2994     );
2995   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2996                                     ConstantPoolCacheEntry::flags_offset());
2997   // access constant pool cache fields
2998   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2999                                     ConstantPoolCacheEntry::f2_offset());
3000 
3001   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
3002   resolve_cache_and_index(byte_no, cache, index, index_size);
3003     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
3004     __ ldr(method, Address(temp_reg, method_offset));
3005 
3006   if (itable_index != noreg) {
3007     __ ldr(itable_index, Address(temp_reg, index_offset));
3008   }
3009   __ ldr_u32(flags, Address(temp_reg, flags_offset));
3010 }
3011 
3012 
3013 // The registers cache and index expected to be set before call, and should not be Rtemp.
3014 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3015 // except cache and index registers which are preserved.
3016 void TemplateTable::jvmti_post_field_access(Register Rcache,
3017                                             Register Rindex,
3018                                             bool is_static,
3019                                             bool has_tos) {
3020   assert_different_registers(Rcache, Rindex, Rtemp);
3021 
3022   if (__ can_post_field_access()) {
3023     // Check to see if a field access watch has been set before we take
3024     // the time to call into the VM.
3025 
3026     Label Lcontinue;
3027 
3028     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
3029     __ cbz(Rtemp, Lcontinue);
3030 
3031     // cache entry pointer
3032     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3033     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
3034     if (is_static) {
3035       __ mov(R1, 0);        // NULL object reference
3036     } else {
3037       __ pop(atos);         // Get the object
3038       __ mov(R1, R0_tos);
3039       __ verify_oop(R1);
3040       __ push(atos);        // Restore stack state
3041     }
3042     // R1: object pointer or NULL
3043     // R2: cache entry pointer
3044     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3045                R1, R2);
3046     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3047 
3048     __ bind(Lcontinue);
3049   }
3050 }
3051 
3052 
3053 void TemplateTable::pop_and_check_object(Register r) {
3054   __ pop_ptr(r);
3055   __ null_check(r, Rtemp);  // for field access must check obj.
3056   __ verify_oop(r);
3057 }
3058 
3059 
3060 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3061   transition(vtos, vtos);
3062 
3063   const Register Roffset  = R2_tmp;
3064   const Register Robj     = R3_tmp;
3065   const Register Rcache   = R4_tmp;
3066   const Register Rflagsav = Rtmp_save0;  // R4/R19
3067   const Register Rindex   = R5_tmp;
3068   const Register Rflags   = R5_tmp;
3069 
3070   const bool gen_volatile_check = os::is_MP();
3071 
3072   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3073   jvmti_post_field_access(Rcache, Rindex, is_static, false);
3074   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3075 
3076   if (gen_volatile_check) {
3077     __ mov(Rflagsav, Rflags);
3078   }
3079 
3080   if (!is_static) pop_and_check_object(Robj);
3081 
3082   Label Done, Lint, Ltable, shouldNotReachHere;
3083   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3084 
3085   // compute type
3086   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3087   // Make sure we don't need to mask flags after the above shift
3088   ConstantPoolCacheEntry::verify_tos_state_shift();
3089 
3090   // There are actually two versions of implementation of getfield/getstatic:
3091   //
3092   // 32-bit ARM:
3093   // 1) Table switch using add(PC,...) instruction (fast_version)
3094   // 2) Table switch using ldr(PC,...) instruction
3095   //
3096   // AArch64:
3097   // 1) Table switch using adr/add/br instructions (fast_version)
3098   // 2) Table switch using adr/ldr/br instructions
3099   //
3100   // First version requires fixed size of code block for each case and
3101   // can not be used in RewriteBytecodes and VerifyOops
3102   // modes.
3103 
3104   // Size of fixed size code block for fast_version
3105   const int log_max_block_size = 2;
3106   const int max_block_size = 1 << log_max_block_size;
3107 
3108   // Decide if fast version is enabled
3109   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3110 
3111   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3112   // atos requires additional processing in slow version.
3113   // On AArch64 atos and itos cannot be merged.
3114   bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3115 
3116   assert(number_of_states == 10, "number of tos states should be equal to 9");
3117 
3118   __ cmp(Rflags, itos);
3119 #ifdef AARCH64
3120   __ b(Lint, eq);
3121 
3122   if(fast_version) {
3123     __ adr(Rtemp, Lbtos);
3124     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3125     __ br(Rtemp);
3126   } else {
3127     __ adr(Rtemp, Ltable);
3128     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3129     __ br(Rtemp);
3130   }
3131 #else
3132   if(atos_merged_with_itos) {
3133     __ cmp(Rflags, atos, ne);
3134   }
3135 
3136   // table switch by type
3137   if(fast_version) {
3138     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3139   } else {
3140     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3141   }
3142 
3143   // jump to itos/atos case
3144   __ b(Lint);
3145 #endif // AARCH64
3146 
3147   // table with addresses for slow version
3148   if (fast_version) {
3149     // nothing to do
3150   } else  {
3151     AARCH64_ONLY(__ align(wordSize));
3152     __ bind(Ltable);
3153     __ emit_address(Lbtos);
3154     __ emit_address(Lztos);
3155     __ emit_address(Lctos);
3156     __ emit_address(Lstos);
3157     __ emit_address(Litos);
3158     __ emit_address(Lltos);
3159     __ emit_address(Lftos);
3160     __ emit_address(Ldtos);
3161     __ emit_address(Latos);
3162   }
3163 
3164 #ifdef ASSERT
3165   int seq = 0;
3166 #endif
3167   // btos
3168   {
3169     assert(btos == seq++, "btos has unexpected value");
3170     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3171     __ bind(Lbtos);
3172     __ ldrsb(R0_tos, Address(Robj, Roffset));
3173     __ push(btos);
3174     // Rewrite bytecode to be faster
3175     if (!is_static && rc == may_rewrite) {
3176       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3177     }
3178     __ b(Done);
3179   }
3180 
3181   // ztos (same as btos for getfield)
3182   {
3183     assert(ztos == seq++, "btos has unexpected value");
3184     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3185     __ bind(Lztos);
3186     __ ldrsb(R0_tos, Address(Robj, Roffset));
3187     __ push(ztos);
3188     // Rewrite bytecode to be faster (use btos fast getfield)
3189     if (!is_static && rc == may_rewrite) {
3190       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3191     }
3192     __ b(Done);
3193   }
3194 
3195   // ctos
3196   {
3197     assert(ctos == seq++, "ctos has unexpected value");
3198     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3199     __ bind(Lctos);
3200     __ ldrh(R0_tos, Address(Robj, Roffset));
3201     __ push(ctos);
3202     if (!is_static && rc == may_rewrite) {
3203       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3204     }
3205     __ b(Done);
3206   }
3207 
3208   // stos
3209   {
3210     assert(stos == seq++, "stos has unexpected value");
3211     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3212     __ bind(Lstos);
3213     __ ldrsh(R0_tos, Address(Robj, Roffset));
3214     __ push(stos);
3215     if (!is_static && rc == may_rewrite) {
3216       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3217     }
3218     __ b(Done);
3219   }
3220 
3221   // itos
3222   {
3223     assert(itos == seq++, "itos has unexpected value");
3224     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3225     __ bind(Litos);
3226     __ b(shouldNotReachHere);
3227   }
3228 
3229   // ltos
3230   {
3231     assert(ltos == seq++, "ltos has unexpected value");
3232     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3233     __ bind(Lltos);
3234 #ifdef AARCH64
3235     __ ldr(R0_tos, Address(Robj, Roffset));
3236 #else
3237     __ add(Roffset, Robj, Roffset);
3238     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3239 #endif // AARCH64
3240     __ push(ltos);
3241     if (!is_static && rc == may_rewrite) {
3242       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3243     }
3244     __ b(Done);
3245   }
3246 
3247   // ftos
3248   {
3249     assert(ftos == seq++, "ftos has unexpected value");
3250     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3251     __ bind(Lftos);
3252     // floats and ints are placed on stack in same way, so
3253     // we can use push(itos) to transfer value without using VFP
3254     __ ldr_u32(R0_tos, Address(Robj, Roffset));
3255     __ push(itos);
3256     if (!is_static && rc == may_rewrite) {
3257       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3258     }
3259     __ b(Done);
3260   }
3261 
3262   // dtos
3263   {
3264     assert(dtos == seq++, "dtos has unexpected value");
3265     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3266     __ bind(Ldtos);
3267     // doubles and longs are placed on stack in the same way, so
3268     // we can use push(ltos) to transfer value without using VFP
3269 #ifdef AARCH64
3270     __ ldr(R0_tos, Address(Robj, Roffset));
3271 #else
3272     __ add(Rtemp, Robj, Roffset);
3273     __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3274 #endif // AARCH64
3275     __ push(ltos);
3276     if (!is_static && rc == may_rewrite) {
3277       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3278     }
3279     __ b(Done);
3280   }
3281 
3282   // atos
3283   {
3284     assert(atos == seq++, "atos has unexpected value");
3285 
3286     // atos case for AArch64 and slow version on 32-bit ARM
3287     if(!atos_merged_with_itos) {
3288       __ bind(Latos);
3289       __ load_heap_oop(R0_tos, Address(Robj, Roffset));
3290       __ push(atos);
3291       // Rewrite bytecode to be faster
3292       if (!is_static && rc == may_rewrite) {
3293         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3294       }
3295       __ b(Done);
3296     }
3297   }
3298 
3299   assert(vtos == seq++, "vtos has unexpected value");
3300 
3301   __ bind(shouldNotReachHere);
3302   __ should_not_reach_here();
3303 
3304   // itos and atos cases are frequent so it makes sense to move them out of table switch
3305   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3306 
3307   __ bind(Lint);
3308   __ ldr_s32(R0_tos, Address(Robj, Roffset));
3309   __ push(itos);
3310   // Rewrite bytecode to be faster
3311   if (!is_static && rc == may_rewrite) {
3312     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3313   }
3314 
3315   __ bind(Done);
3316 
3317   if (gen_volatile_check) {
3318     // Check for volatile field
3319     Label notVolatile;
3320     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3321 
3322     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3323 
3324     __ bind(notVolatile);
3325   }
3326 
3327 }
3328 
3329 void TemplateTable::getfield(int byte_no) {
3330   getfield_or_static(byte_no, false);
3331 }
3332 
3333 void TemplateTable::nofast_getfield(int byte_no) {
3334   getfield_or_static(byte_no, false, may_not_rewrite);
3335 }
3336 
3337 void TemplateTable::getstatic(int byte_no) {
3338   getfield_or_static(byte_no, true);
3339 }
3340 
3341 
3342 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3343 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3344 // except cache and index registers which are preserved.
3345 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3346   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3347   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3348 
3349   if (__ can_post_field_modification()) {
3350     // Check to see if a field modification watch has been set before we take
3351     // the time to call into the VM.
3352     Label Lcontinue;
3353 
3354     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3355     __ cbz(Rtemp, Lcontinue);
3356 
3357     if (is_static) {
3358       // Life is simple.  Null out the object pointer.
3359       __ mov(R1, 0);
3360     } else {
3361       // Life is harder. The stack holds the value on top, followed by the object.
3362       // We don't know the size of the value, though; it could be one or two words
3363       // depending on its type. As a result, we must find the type to determine where
3364       // the object is.
3365 
3366       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3367       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3368 
3369       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3370       // Make sure we don't need to mask Rtemp after the above shift
3371       ConstantPoolCacheEntry::verify_tos_state_shift();
3372 
3373       __ cmp(Rtemp, ltos);
3374       __ cond_cmp(Rtemp, dtos, ne);
3375 #ifdef AARCH64
3376       __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3377       __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3378       __ mov(R1, Rtemp, eq);
3379       __ ldr(R1, Address(Rstack_top, R1));
3380 #else
3381       // two word value (ltos/dtos)
3382       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3383 
3384       // one word value (not ltos, dtos)
3385       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3386 #endif // AARCH64
3387     }
3388 
3389     // cache entry pointer
3390     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3391     __ add(R2, R2, in_bytes(cp_base_offset));
3392 
3393     // object (tos)
3394     __ mov(R3, Rstack_top);
3395 
3396     // R1: object pointer set up above (NULL if static)
3397     // R2: cache entry pointer
3398     // R3: value object on the stack
3399     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3400                R1, R2, R3);
3401     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3402 
3403     __ bind(Lcontinue);
3404   }
3405 }
3406 
3407 
3408 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3409   transition(vtos, vtos);
3410 
3411   const Register Roffset  = R2_tmp;
3412   const Register Robj     = R3_tmp;
3413   const Register Rcache   = R4_tmp;
3414   const Register Rflagsav = Rtmp_save0;  // R4/R19
3415   const Register Rindex   = R5_tmp;
3416   const Register Rflags   = R5_tmp;
3417 
3418   const bool gen_volatile_check = os::is_MP();
3419 
3420   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3421   jvmti_post_field_mod(Rcache, Rindex, is_static);
3422   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3423 
3424   if (gen_volatile_check) {
3425     // Check for volatile field
3426     Label notVolatile;
3427     __ mov(Rflagsav, Rflags);
3428     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3429 
3430     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3431 
3432     __ bind(notVolatile);
3433   }
3434 
3435   Label Done, Lint, shouldNotReachHere;
3436   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3437 
3438   // compute type
3439   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3440   // Make sure we don't need to mask flags after the above shift
3441   ConstantPoolCacheEntry::verify_tos_state_shift();
3442 
3443   // There are actually two versions of implementation of putfield/putstatic:
3444   //
3445   // 32-bit ARM:
3446   // 1) Table switch using add(PC,...) instruction (fast_version)
3447   // 2) Table switch using ldr(PC,...) instruction
3448   //
3449   // AArch64:
3450   // 1) Table switch using adr/add/br instructions (fast_version)
3451   // 2) Table switch using adr/ldr/br instructions
3452   //
3453   // First version requires fixed size of code block for each case and
3454   // can not be used in RewriteBytecodes and VerifyOops
3455   // modes.
3456 
3457   // Size of fixed size code block for fast_version (in instructions)
3458   const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3459   const int max_block_size = 1 << log_max_block_size;
3460 
3461   // Decide if fast version is enabled
3462   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3463 
3464   assert(number_of_states == 10, "number of tos states should be equal to 9");
3465 
3466   // itos case is frequent and is moved outside table switch
3467   __ cmp(Rflags, itos);
3468 
3469 #ifdef AARCH64
3470   __ b(Lint, eq);
3471 
3472   if (fast_version) {
3473     __ adr(Rtemp, Lbtos);
3474     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3475     __ br(Rtemp);
3476   } else {
3477     __ adr(Rtemp, Ltable);
3478     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3479     __ br(Rtemp);
3480   }
3481 #else
3482   // table switch by type
3483   if (fast_version) {
3484     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3485   } else  {
3486     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3487   }
3488 
3489   // jump to itos case
3490   __ b(Lint);
3491 #endif // AARCH64
3492 
3493   // table with addresses for slow version
3494   if (fast_version) {
3495     // nothing to do
3496   } else  {
3497     AARCH64_ONLY(__ align(wordSize));
3498     __ bind(Ltable);
3499     __ emit_address(Lbtos);
3500     __ emit_address(Lztos);
3501     __ emit_address(Lctos);
3502     __ emit_address(Lstos);
3503     __ emit_address(Litos);
3504     __ emit_address(Lltos);
3505     __ emit_address(Lftos);
3506     __ emit_address(Ldtos);
3507     __ emit_address(Latos);
3508   }
3509 
3510 #ifdef ASSERT
3511   int seq = 0;
3512 #endif
3513   // btos
3514   {
3515     assert(btos == seq++, "btos has unexpected value");
3516     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3517     __ bind(Lbtos);
3518     __ pop(btos);
3519     if (!is_static) pop_and_check_object(Robj);
3520     __ strb(R0_tos, Address(Robj, Roffset));
3521     if (!is_static && rc == may_rewrite) {
3522       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3523     }
3524     __ b(Done);
3525   }
3526 
3527   // ztos
3528   {
3529     assert(ztos == seq++, "ztos has unexpected value");
3530     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3531     __ bind(Lztos);
3532     __ pop(ztos);
3533     if (!is_static) pop_and_check_object(Robj);
3534     __ and_32(R0_tos, R0_tos, 1);
3535     __ strb(R0_tos, Address(Robj, Roffset));
3536     if (!is_static && rc == may_rewrite) {
3537       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3538     }
3539     __ b(Done);
3540   }
3541 
3542   // ctos
3543   {
3544     assert(ctos == seq++, "ctos has unexpected value");
3545     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3546     __ bind(Lctos);
3547     __ pop(ctos);
3548     if (!is_static) pop_and_check_object(Robj);
3549     __ strh(R0_tos, Address(Robj, Roffset));
3550     if (!is_static && rc == may_rewrite) {
3551       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3552     }
3553     __ b(Done);
3554   }
3555 
3556   // stos
3557   {
3558     assert(stos == seq++, "stos has unexpected value");
3559     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3560     __ bind(Lstos);
3561     __ pop(stos);
3562     if (!is_static) pop_and_check_object(Robj);
3563     __ strh(R0_tos, Address(Robj, Roffset));
3564     if (!is_static && rc == may_rewrite) {
3565       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3566     }
3567     __ b(Done);
3568   }
3569 
3570   // itos
3571   {
3572     assert(itos == seq++, "itos has unexpected value");
3573     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3574     __ bind(Litos);
3575     __ b(shouldNotReachHere);
3576   }
3577 
3578   // ltos
3579   {
3580     assert(ltos == seq++, "ltos has unexpected value");
3581     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3582     __ bind(Lltos);
3583     __ pop(ltos);
3584     if (!is_static) pop_and_check_object(Robj);
3585 #ifdef AARCH64
3586     __ str(R0_tos, Address(Robj, Roffset));
3587 #else
3588     __ add(Roffset, Robj, Roffset);
3589     __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3590 #endif // AARCH64
3591     if (!is_static && rc == may_rewrite) {
3592       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3593     }
3594     __ b(Done);
3595   }
3596 
3597   // ftos
3598   {
3599     assert(ftos == seq++, "ftos has unexpected value");
3600     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3601     __ bind(Lftos);
3602     // floats and ints are placed on stack in the same way, so
3603     // we can use pop(itos) to transfer value without using VFP
3604     __ pop(itos);
3605     if (!is_static) pop_and_check_object(Robj);
3606     __ str_32(R0_tos, Address(Robj, Roffset));
3607     if (!is_static && rc == may_rewrite) {
3608       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3609     }
3610     __ b(Done);
3611   }
3612 
3613   // dtos
3614   {
3615     assert(dtos == seq++, "dtos has unexpected value");
3616     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3617     __ bind(Ldtos);
3618     // doubles and longs are placed on stack in the same way, so
3619     // we can use pop(ltos) to transfer value without using VFP
3620     __ pop(ltos);
3621     if (!is_static) pop_and_check_object(Robj);
3622 #ifdef AARCH64
3623     __ str(R0_tos, Address(Robj, Roffset));
3624 #else
3625     __ add(Rtemp, Robj, Roffset);
3626     __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3627 #endif // AARCH64
3628     if (!is_static && rc == may_rewrite) {
3629       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3630     }
3631     __ b(Done);
3632   }
3633 
3634   // atos
3635   {
3636     assert(atos == seq++, "dtos has unexpected value");
3637     __ bind(Latos);
3638     __ pop(atos);
3639     if (!is_static) pop_and_check_object(Robj);
3640     // Store into the field
3641     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, _bs->kind(), false, false);
3642     if (!is_static && rc == may_rewrite) {
3643       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3644     }
3645     __ b(Done);
3646   }
3647 
3648   __ bind(shouldNotReachHere);
3649   __ should_not_reach_here();
3650 
3651   // itos case is frequent and is moved outside table switch
3652   __ bind(Lint);
3653   __ pop(itos);
3654   if (!is_static) pop_and_check_object(Robj);
3655   __ str_32(R0_tos, Address(Robj, Roffset));
3656   if (!is_static && rc == may_rewrite) {
3657     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3658   }
3659 
3660   __ bind(Done);
3661 
3662   if (gen_volatile_check) {
3663     Label notVolatile;
3664     if (is_static) {
3665       // Just check for volatile. Memory barrier for static final field
3666       // is handled by class initialization.
3667       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3668       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3669       __ bind(notVolatile);
3670     } else {
3671       // Check for volatile field and final field
3672       Label skipMembar;
3673 
3674       __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3675                        1 << ConstantPoolCacheEntry::is_final_shift);
3676       __ b(skipMembar, eq);
3677 
3678       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3679 
3680       // StoreLoad barrier after volatile field write
3681       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3682       __ b(skipMembar);
3683 
3684       // StoreStore barrier after final field write
3685       __ bind(notVolatile);
3686       volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3687 
3688       __ bind(skipMembar);
3689     }
3690   }
3691 
3692 }
3693 
3694 void TemplateTable::putfield(int byte_no) {
3695   putfield_or_static(byte_no, false);
3696 }
3697 
3698 void TemplateTable::nofast_putfield(int byte_no) {
3699   putfield_or_static(byte_no, false, may_not_rewrite);
3700 }
3701 
3702 void TemplateTable::putstatic(int byte_no) {
3703   putfield_or_static(byte_no, true);
3704 }
3705 
3706 
3707 void TemplateTable::jvmti_post_fast_field_mod() {
3708   // This version of jvmti_post_fast_field_mod() is not used on ARM
3709   Unimplemented();
3710 }
3711 
3712 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3713 // but preserves tosca with the given state.
3714 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3715   if (__ can_post_field_modification()) {
3716     // Check to see if a field modification watch has been set before we take
3717     // the time to call into the VM.
3718     Label done;
3719 
3720     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3721     __ cbz(R2, done);
3722 
3723     __ pop_ptr(R3);               // copy the object pointer from tos
3724     __ verify_oop(R3);
3725     __ push_ptr(R3);              // put the object pointer back on tos
3726 
3727     __ push(state);               // save value on the stack
3728 
3729     // access constant pool cache entry
3730     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3731 
3732     __ mov(R1, R3);
3733     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3734     __ mov(R3, Rstack_top); // put tos addr into R3
3735 
3736     // R1: object pointer copied above
3737     // R2: cache entry pointer
3738     // R3: jvalue object on the stack
3739     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3740 
3741     __ pop(state);                // restore value
3742 
3743     __ bind(done);
3744   }
3745 }
3746 
3747 
3748 void TemplateTable::fast_storefield(TosState state) {
3749   transition(state, vtos);
3750 
3751   ByteSize base = ConstantPoolCache::base_offset();
3752 
3753   jvmti_post_fast_field_mod(state);
3754 
3755   const Register Rcache  = R2_tmp;
3756   const Register Rindex  = R3_tmp;
3757   const Register Roffset = R3_tmp;
3758   const Register Rflags  = Rtmp_save0; // R4/R19
3759   const Register Robj    = R5_tmp;
3760 
3761   const bool gen_volatile_check = os::is_MP();
3762 
3763   // access constant pool cache
3764   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3765 
3766   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3767 
3768   if (gen_volatile_check) {
3769     // load flags to test volatile
3770     __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3771   }
3772 
3773   // replace index with field offset from cache entry
3774   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3775 
3776   if (gen_volatile_check) {
3777     // Check for volatile store
3778     Label notVolatile;
3779     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3780 
3781     // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3782     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3783 
3784     __ bind(notVolatile);
3785   }
3786 
3787   // Get object from stack
3788   pop_and_check_object(Robj);
3789 
3790   // access field
3791   switch (bytecode()) {
3792     case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3793                                      // fall through
3794     case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3795     case Bytecodes::_fast_sputfield: // fall through
3796     case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3797     case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3798 #ifdef AARCH64
3799     case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
3800     case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3801     case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3802 #else
3803     case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3804                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3805 
3806 #ifdef __SOFTFP__
3807     case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
3808     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3809                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3810 #else
3811     case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3812                                      __ fsts(S0_tos, Address(Robj));          break;
3813     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3814                                      __ fstd(D0_tos, Address(Robj));          break;
3815 #endif // __SOFTFP__
3816 #endif // AARCH64
3817 
3818     case Bytecodes::_fast_aputfield:
3819       do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, _bs->kind(), false, false);
3820       break;
3821 
3822     default:
3823       ShouldNotReachHere();
3824   }
3825 
3826   if (gen_volatile_check) {
3827     Label notVolatile;
3828     Label skipMembar;
3829     __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3830                    1 << ConstantPoolCacheEntry::is_final_shift);
3831     __ b(skipMembar, eq);
3832 
3833     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3834 
3835     // StoreLoad barrier after volatile field write
3836     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3837     __ b(skipMembar);
3838 
3839     // StoreStore barrier after final field write
3840     __ bind(notVolatile);
3841     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3842 
3843     __ bind(skipMembar);
3844   }
3845 }
3846 
3847 
3848 void TemplateTable::fast_accessfield(TosState state) {
3849   transition(atos, state);
3850 
3851   // do the JVMTI work here to avoid disturbing the register state below
3852   if (__ can_post_field_access()) {
3853     // Check to see if a field access watch has been set before we take
3854     // the time to call into the VM.
3855     Label done;
3856     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3857     __ cbz(R2, done);
3858     // access constant pool cache entry
3859     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3860     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3861     __ verify_oop(R0_tos);
3862     __ mov(R1, R0_tos);
3863     // R1: object pointer copied above
3864     // R2: cache entry pointer
3865     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3866     __ pop_ptr(R0_tos);   // restore object pointer
3867 
3868     __ bind(done);
3869   }
3870 
3871   const Register Robj    = R0_tos;
3872   const Register Rcache  = R2_tmp;
3873   const Register Rflags  = R2_tmp;
3874   const Register Rindex  = R3_tmp;
3875   const Register Roffset = R3_tmp;
3876 
3877   const bool gen_volatile_check = os::is_MP();
3878 
3879   // access constant pool cache
3880   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3881   // replace index with field offset from cache entry
3882   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3883   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3884 
3885   if (gen_volatile_check) {
3886     // load flags to test volatile
3887     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3888   }
3889 
3890   __ verify_oop(Robj);
3891   __ null_check(Robj, Rtemp);
3892 
3893   // access field
3894   switch (bytecode()) {
3895     case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
3896     case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
3897     case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
3898     case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
3899 #ifdef AARCH64
3900     case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3901     case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
3902     case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
3903 #else
3904     case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
3905                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3906 #ifdef __SOFTFP__
3907     case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3908     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
3909                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3910 #else
3911     case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
3912     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
3913 #endif // __SOFTFP__
3914 #endif // AARCH64
3915     case Bytecodes::_fast_agetfield: __ load_heap_oop(R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
3916     default:
3917       ShouldNotReachHere();
3918   }
3919 
3920   if (gen_volatile_check) {
3921     // Check for volatile load
3922     Label notVolatile;
3923     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3924 
3925     // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
3926     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3927 
3928     __ bind(notVolatile);
3929   }
3930 }
3931 
3932 
3933 void TemplateTable::fast_xaccess(TosState state) {
3934   transition(vtos, state);
3935 
3936   const Register Robj = R1_tmp;
3937   const Register Rcache = R2_tmp;
3938   const Register Rindex = R3_tmp;
3939   const Register Roffset = R3_tmp;
3940   const Register Rflags = R4_tmp;
3941   Label done;
3942 
3943   // get receiver
3944   __ ldr(Robj, aaddress(0));
3945 
3946   // access constant pool cache
3947   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
3948   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3949   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3950 
3951   const bool gen_volatile_check = os::is_MP();
3952 
3953   if (gen_volatile_check) {
3954     // load flags to test volatile
3955     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3956   }
3957 
3958   // make sure exception is reported in correct bcp range (getfield is next instruction)
3959   __ add(Rbcp, Rbcp, 1);
3960   __ null_check(Robj, Rtemp);
3961   __ sub(Rbcp, Rbcp, 1);
3962 
3963 #ifdef AARCH64
3964   if (gen_volatile_check) {
3965     Label notVolatile;
3966     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3967 
3968     __ add(Rtemp, Robj, Roffset);
3969 
3970     if (state == itos) {
3971       __ ldar_w(R0_tos, Rtemp);
3972     } else if (state == atos) {
3973       if (UseCompressedOops) {
3974         __ ldar_w(R0_tos, Rtemp);
3975         __ decode_heap_oop(R0_tos);
3976       } else {
3977         __ ldar(R0_tos, Rtemp);
3978       }
3979       __ verify_oop(R0_tos);
3980     } else if (state == ftos) {
3981       __ ldar_w(R0_tos, Rtemp);
3982       __ fmov_sw(S0_tos, R0_tos);
3983     } else {
3984       ShouldNotReachHere();
3985     }
3986     __ b(done);
3987 
3988     __ bind(notVolatile);
3989   }
3990 #endif // AARCH64
3991 
3992   if (state == itos) {
3993     __ ldr_s32(R0_tos, Address(Robj, Roffset));
3994   } else if (state == atos) {
3995     __ load_heap_oop(R0_tos, Address(Robj, Roffset));
3996     __ verify_oop(R0_tos);
3997   } else if (state == ftos) {
3998 #ifdef AARCH64
3999     __ ldr_s(S0_tos, Address(Robj, Roffset));
4000 #else
4001 #ifdef __SOFTFP__
4002     __ ldr(R0_tos, Address(Robj, Roffset));
4003 #else
4004     __ add(Roffset, Robj, Roffset);
4005     __ flds(S0_tos, Address(Roffset));
4006 #endif // __SOFTFP__
4007 #endif // AARCH64
4008   } else {
4009     ShouldNotReachHere();
4010   }
4011 
4012 #ifndef AARCH64
4013   if (gen_volatile_check) {
4014     // Check for volatile load
4015     Label notVolatile;
4016     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4017 
4018     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4019 
4020     __ bind(notVolatile);
4021   }
4022 #endif // !AARCH64
4023 
4024   __ bind(done);
4025 }
4026 
4027 
4028 
4029 //----------------------------------------------------------------------------------------------------
4030 // Calls
4031 
4032 void TemplateTable::count_calls(Register method, Register temp) {
4033   // implemented elsewhere
4034   ShouldNotReachHere();
4035 }
4036 
4037 
4038 void TemplateTable::prepare_invoke(int byte_no,
4039                                    Register method,  // linked method (or i-klass)
4040                                    Register index,   // itable index, MethodType, etc.
4041                                    Register recv,    // if caller wants to see it
4042                                    Register flags    // if caller wants to test it
4043                                    ) {
4044   // determine flags
4045   const Bytecodes::Code code = bytecode();
4046   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
4047   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
4048   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
4049   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
4050   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
4051   const bool load_receiver       = (recv != noreg);
4052   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4053   assert(recv  == noreg || recv  == R2, "");
4054   assert(flags == noreg || flags == R3, "");
4055 
4056   // setup registers & access constant pool cache
4057   if (recv  == noreg)  recv  = R2;
4058   if (flags == noreg)  flags = R3;
4059   const Register temp = Rtemp;
4060   const Register ret_type = R1_tmp;
4061   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4062 
4063   // save 'interpreter return address'
4064   __ save_bcp();
4065 
4066   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4067 
4068   // maybe push extra argument
4069   if (is_invokedynamic || is_invokehandle) {
4070     Label L_no_push;
4071     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4072     __ mov(temp, index);
4073     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4074     __ load_resolved_reference_at_index(index, temp);
4075     __ verify_oop(index);
4076     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
4077     __ bind(L_no_push);
4078   }
4079 
4080   // load receiver if needed (after extra argument is pushed so parameter size is correct)
4081   if (load_receiver) {
4082     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
4083     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4084     __ ldr(recv, recv_addr);
4085     __ verify_oop(recv);
4086   }
4087 
4088   // compute return type
4089   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4090   // Make sure we don't need to mask flags after the above shift
4091   ConstantPoolCacheEntry::verify_tos_state_shift();
4092   // load return address
4093   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4094     __ mov_slow(temp, table);
4095     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4096   }
4097 }
4098 
4099 
4100 void TemplateTable::invokevirtual_helper(Register index,
4101                                          Register recv,
4102                                          Register flags) {
4103 
4104   const Register recv_klass = R2_tmp;
4105 
4106   assert_different_registers(index, recv, flags, Rtemp);
4107   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4108 
4109   // Test for an invoke of a final method
4110   Label notFinal;
4111   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4112 
4113   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4114 
4115   // do the call - the index is actually the method to call
4116 
4117   // It's final, need a null check here!
4118   __ null_check(recv, Rtemp);
4119 
4120   // profile this call
4121   __ profile_final_call(R0_tmp);
4122 
4123   __ jump_from_interpreted(Rmethod);
4124 
4125   __ bind(notFinal);
4126 
4127   // get receiver klass
4128   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4129   __ load_klass(recv_klass, recv);
4130 
4131   // profile this call
4132   __ profile_virtual_call(R0_tmp, recv_klass);
4133 
4134   // get target Method* & entry point
4135   const int base = in_bytes(Klass::vtable_start_offset());
4136   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4137   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4138   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4139   __ jump_from_interpreted(Rmethod);
4140 }
4141 
4142 void TemplateTable::invokevirtual(int byte_no) {
4143   transition(vtos, vtos);
4144   assert(byte_no == f2_byte, "use this argument");
4145 
4146   const Register Rrecv  = R2_tmp;
4147   const Register Rflags = R3_tmp;
4148 
4149   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4150 
4151   // Rmethod: index
4152   // Rrecv:   receiver
4153   // Rflags:  flags
4154   // LR:      return address
4155 
4156   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4157 }
4158 
4159 
4160 void TemplateTable::invokespecial(int byte_no) {
4161   transition(vtos, vtos);
4162   assert(byte_no == f1_byte, "use this argument");
4163   const Register Rrecv  = R2_tmp;
4164   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4165   __ verify_oop(Rrecv);
4166   __ null_check(Rrecv, Rtemp);
4167   // do the call
4168   __ profile_call(Rrecv);
4169   __ jump_from_interpreted(Rmethod);
4170 }
4171 
4172 
4173 void TemplateTable::invokestatic(int byte_no) {
4174   transition(vtos, vtos);
4175   assert(byte_no == f1_byte, "use this argument");
4176   prepare_invoke(byte_no, Rmethod);
4177   // do the call
4178   __ profile_call(R2_tmp);
4179   __ jump_from_interpreted(Rmethod);
4180 }
4181 
4182 
4183 void TemplateTable::fast_invokevfinal(int byte_no) {
4184   transition(vtos, vtos);
4185   assert(byte_no == f2_byte, "use this argument");
4186   __ stop("fast_invokevfinal is not used on ARM");
4187 }
4188 
4189 
4190 void TemplateTable::invokeinterface(int byte_no) {
4191   transition(vtos, vtos);
4192   assert(byte_no == f1_byte, "use this argument");
4193 
4194   const Register Ritable = R1_tmp;
4195   const Register Rrecv   = R2_tmp;
4196   const Register Rinterf = R5_tmp;
4197   const Register Rindex  = R4_tmp;
4198   const Register Rflags  = R3_tmp;
4199   const Register Rklass  = R3_tmp;
4200 
4201   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4202 
4203   // Special case of invokeinterface called for virtual method of
4204   // java.lang.Object.  See cpCacheOop.cpp for details.
4205   // This code isn't produced by javac, but could be produced by
4206   // another compliant java compiler.
4207   Label notMethod;
4208   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
4209 
4210   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4211   __ bind(notMethod);
4212 
4213   // Get receiver klass into Rklass - also a null check
4214   __ load_klass(Rklass, Rrecv);
4215 
4216   Label no_such_interface;
4217 
4218   // Receiver subtype check against REFC.
4219   __ lookup_interface_method(// inputs: rec. class, interface
4220                              Rklass, Rinterf, noreg,
4221                              // outputs:  scan temp. reg1, scan temp. reg2
4222                              noreg, Ritable, Rtemp,
4223                              no_such_interface);
4224 
4225   // profile this call
4226   __ profile_virtual_call(R0_tmp, Rklass);
4227 
4228   // Get declaring interface class from method
4229   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4230   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4231   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4232 
4233   // Get itable index from method
4234   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4235   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4236   __ neg(Rindex, Rtemp);
4237 
4238   __ lookup_interface_method(// inputs: rec. class, interface
4239                              Rklass, Rinterf, Rindex,
4240                              // outputs:  scan temp. reg1, scan temp. reg2
4241                              Rmethod, Ritable, Rtemp,
4242                              no_such_interface);
4243 
4244   // Rmethod: Method* to call
4245 
4246   // Check for abstract method error
4247   // Note: This should be done more efficiently via a throw_abstract_method_error
4248   //       interpreter entry point and a conditional jump to it in case of a null
4249   //       method.
4250   { Label L;
4251     __ cbnz(Rmethod, L);
4252     // throw exception
4253     // note: must restore interpreter registers to canonical
4254     //       state for exception handling to work correctly!
4255     __ restore_method();
4256     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4257     // the call_VM checks for exception, so we should never return here.
4258     __ should_not_reach_here();
4259     __ bind(L);
4260   }
4261 
4262   // do the call
4263   __ jump_from_interpreted(Rmethod);
4264 
4265   // throw exception
4266   __ bind(no_such_interface);
4267   __ restore_method();
4268   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4269   // the call_VM checks for exception, so we should never return here.
4270   __ should_not_reach_here();
4271 }
4272 
4273 void TemplateTable::invokehandle(int byte_no) {
4274   transition(vtos, vtos);
4275 
4276   // TODO-AARCH64 review register usage
4277   const Register Rrecv  = R2_tmp;
4278   const Register Rmtype = R4_tmp;
4279   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4280 
4281   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4282   __ null_check(Rrecv, Rtemp);
4283 
4284   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
4285   // Rmethod: MH.invokeExact_MT method (from f2)
4286 
4287   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
4288 
4289   // do the call
4290   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
4291   __ mov(Rmethod, R5_method);
4292   __ jump_from_interpreted(Rmethod);
4293 }
4294 
4295 void TemplateTable::invokedynamic(int byte_no) {
4296   transition(vtos, vtos);
4297 
4298   // TODO-AARCH64 review register usage
4299   const Register Rcallsite = R4_tmp;
4300   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4301 
4302   prepare_invoke(byte_no, R5_method, Rcallsite);
4303 
4304   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4305   // Rmethod:   MH.linkToCallSite method (from f2)
4306 
4307   // Note:  Rcallsite is already pushed by prepare_invoke
4308 
4309   if (ProfileInterpreter) {
4310     __ profile_call(R2_tmp);
4311   }
4312 
4313   // do the call
4314   __ mov(Rmethod, R5_method);
4315   __ jump_from_interpreted(Rmethod);
4316 }
4317 
4318 //----------------------------------------------------------------------------------------------------
4319 // Allocation
4320 
4321 void TemplateTable::_new() {
4322   transition(vtos, atos);
4323 
4324   const Register Robj   = R0_tos;
4325   const Register Rcpool = R1_tmp;
4326   const Register Rindex = R2_tmp;
4327   const Register Rtags  = R3_tmp;
4328   const Register Rsize  = R3_tmp;
4329 
4330   Register Rklass = R4_tmp;
4331   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4332   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4333 
4334   Label slow_case;
4335   Label done;
4336   Label initialize_header;
4337   Label initialize_object;  // including clearing the fields
4338 
4339   const bool allow_shared_alloc =
4340     Universe::heap()->supports_inline_contig_alloc();
4341 
4342   // Literals
4343   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4344 
4345   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4346   __ get_cpool_and_tags(Rcpool, Rtags);
4347 
4348   // Make sure the class we're about to instantiate has been resolved.
4349   // This is done before loading InstanceKlass to be consistent with the order
4350   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4351   const int tags_offset = Array<u1>::base_offset_in_bytes();
4352   __ add(Rtemp, Rtags, Rindex);
4353 
4354 #ifdef AARCH64
4355   __ add(Rtemp, Rtemp, tags_offset);
4356   __ ldarb(Rtemp, Rtemp);
4357 #else
4358   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4359 
4360   // use Rklass as a scratch
4361   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4362 #endif // AARCH64
4363 
4364   // get InstanceKlass
4365   __ cmp(Rtemp, JVM_CONSTANT_Class);
4366   __ b(slow_case, ne);
4367   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4368 
4369   // make sure klass is initialized & doesn't have finalizer
4370   // make sure klass is fully initialized
4371   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4372   __ cmp(Rtemp, InstanceKlass::fully_initialized);
4373   __ b(slow_case, ne);
4374 
4375   // get instance_size in InstanceKlass (scaled to a count of bytes)
4376   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4377 
4378   // test to see if it has a finalizer or is malformed in some way
4379   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4380   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4381 
4382   // Allocate the instance:
4383   //  If TLAB is enabled:
4384   //    Try to allocate in the TLAB.
4385   //    If fails, go to the slow path.
4386   //  Else If inline contiguous allocations are enabled:
4387   //    Try to allocate in eden.
4388   //    If fails due to heap end, go to slow path.
4389   //
4390   //  If TLAB is enabled OR inline contiguous is enabled:
4391   //    Initialize the allocation.
4392   //    Exit.
4393   //
4394   //  Go to slow path.
4395   if (UseTLAB) {
4396     const Register Rtlab_top = R1_tmp;
4397     const Register Rtlab_end = R2_tmp;
4398     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4399 
4400     __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4401     __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_current_end_offset())));
4402     __ add(Rtlab_top, Robj, Rsize);
4403     __ cmp(Rtlab_top, Rtlab_end);
4404     __ b(slow_case, hi);
4405     __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4406     if (ZeroTLAB) {
4407       // the fields have been already cleared
4408       __ b(initialize_header);
4409     } else {
4410       // initialize both the header and fields
4411       __ b(initialize_object);
4412     }
4413   } else {
4414     // Allocation in the shared Eden, if allowed.
4415     if (allow_shared_alloc) {
4416       const Register Rheap_top_addr = R2_tmp;
4417       const Register Rheap_top = R5_tmp;
4418       const Register Rheap_end = Rtemp;
4419       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4420 
4421       // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4422       __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4423 
4424       Label retry;
4425       __ bind(retry);
4426 
4427 #ifdef AARCH64
4428       __ ldxr(Robj, Rheap_top_addr);
4429 #else
4430       __ ldr(Robj, Address(Rheap_top_addr));
4431 #endif // AARCH64
4432 
4433       __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4434       __ add(Rheap_top, Robj, Rsize);
4435       __ cmp(Rheap_top, Rheap_end);
4436       __ b(slow_case, hi);
4437 
4438       // Update heap top atomically.
4439       // If someone beats us on the allocation, try again, otherwise continue.
4440 #ifdef AARCH64
4441       __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4442       __ cbnz_w(Rtemp2, retry);
4443 #else
4444       __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4445       __ b(retry, ne);
4446 #endif // AARCH64
4447 
4448       __ incr_allocated_bytes(Rsize, Rtemp);
4449     }
4450   }
4451 
4452   if (UseTLAB || allow_shared_alloc) {
4453     const Register Rzero0 = R1_tmp;
4454     const Register Rzero1 = R2_tmp;
4455     const Register Rzero_end = R5_tmp;
4456     const Register Rzero_cur = Rtemp;
4457     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4458 
4459     // The object is initialized before the header.  If the object size is
4460     // zero, go directly to the header initialization.
4461     __ bind(initialize_object);
4462     __ subs(Rsize, Rsize, sizeof(oopDesc));
4463     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4464     __ b(initialize_header, eq);
4465 
4466 #ifdef ASSERT
4467     // make sure Rsize is a multiple of 8
4468     Label L;
4469     __ tst(Rsize, 0x07);
4470     __ b(L, eq);
4471     __ stop("object size is not multiple of 8 - adjust this code");
4472     __ bind(L);
4473 #endif
4474 
4475 #ifdef AARCH64
4476     {
4477       Label loop;
4478       // Step back by 1 word if object size is not a multiple of 2*wordSize.
4479       assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4480       __ andr(Rtemp2, Rsize, (uintx)wordSize);
4481       __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4482 
4483       // Zero by 2 words per iteration.
4484       __ bind(loop);
4485       __ subs(Rsize, Rsize, 2*wordSize);
4486       __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4487       __ b(loop, gt);
4488     }
4489 #else
4490     __ mov(Rzero0, 0);
4491     __ mov(Rzero1, 0);
4492     __ add(Rzero_end, Rzero_cur, Rsize);
4493 
4494     // initialize remaining object fields: Rsize was a multiple of 8
4495     { Label loop;
4496       // loop is unrolled 2 times
4497       __ bind(loop);
4498       // #1
4499       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4500       __ cmp(Rzero_cur, Rzero_end);
4501       // #2
4502       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4503       __ cmp(Rzero_cur, Rzero_end, ne);
4504       __ b(loop, ne);
4505     }
4506 #endif // AARCH64
4507 
4508     // initialize object header only.
4509     __ bind(initialize_header);
4510     if (UseBiasedLocking) {
4511       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4512     } else {
4513       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4514     }
4515     // mark
4516     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4517 
4518     // klass
4519 #ifdef AARCH64
4520     __ store_klass_gap(Robj);
4521 #endif // AARCH64
4522     __ store_klass(Rklass, Robj); // blows Rklass:
4523     Rklass = noreg;
4524 
4525     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4526     if (DTraceAllocProbes) {
4527       // Trigger dtrace event for fastpath
4528       Label Lcontinue;
4529 
4530       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4531       __ cbz(Rtemp, Lcontinue);
4532 
4533       __ push(atos);
4534       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4535       __ pop(atos);
4536 
4537       __ bind(Lcontinue);
4538     }
4539 
4540     __ b(done);
4541   } else {
4542     // jump over literals
4543     __ b(slow_case);
4544   }
4545 
4546   if (allow_shared_alloc) {
4547     __ bind_literal(Lheap_top_addr);
4548   }
4549 
4550   // slow case
4551   __ bind(slow_case);
4552   __ get_constant_pool(Rcpool);
4553   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4554   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4555 
4556   // continue
4557   __ bind(done);
4558 
4559   // StoreStore barrier required after complete initialization
4560   // (headers + content zeroing), before the object may escape.
4561   __ membar(MacroAssembler::StoreStore, R1_tmp);
4562 }
4563 
4564 
4565 void TemplateTable::newarray() {
4566   transition(itos, atos);
4567   __ ldrb(R1, at_bcp(1));
4568   __ mov(R2, R0_tos);
4569   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4570   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4571 }
4572 
4573 
4574 void TemplateTable::anewarray() {
4575   transition(itos, atos);
4576   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4577   __ get_constant_pool(R1);
4578   __ mov(R3, R0_tos);
4579   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4580   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4581 }
4582 
4583 
4584 void TemplateTable::arraylength() {
4585   transition(atos, itos);
4586   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4587   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4588 }
4589 
4590 
4591 void TemplateTable::checkcast() {
4592   transition(atos, atos);
4593   Label done, is_null, quicked, resolved, throw_exception;
4594 
4595   const Register Robj = R0_tos;
4596   const Register Rcpool = R2_tmp;
4597   const Register Rtags = R3_tmp;
4598   const Register Rindex = R4_tmp;
4599   const Register Rsuper = R3_tmp;
4600   const Register Rsub   = R4_tmp;
4601   const Register Rsubtype_check_tmp1 = R1_tmp;
4602   const Register Rsubtype_check_tmp2 = LR_tmp;
4603 
4604   __ cbz(Robj, is_null);
4605 
4606   // Get cpool & tags index
4607   __ get_cpool_and_tags(Rcpool, Rtags);
4608   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4609 
4610   // See if bytecode has already been quicked
4611   __ add(Rtemp, Rtags, Rindex);
4612 #ifdef AARCH64
4613   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4614   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4615   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4616 #else
4617   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4618 #endif // AARCH64
4619 
4620   __ cmp(Rtemp, JVM_CONSTANT_Class);
4621 
4622 #ifndef AARCH64
4623   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4624 #endif // !AARCH64
4625 
4626   __ b(quicked, eq);
4627 
4628   __ push(atos);
4629   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4630   // vm_result_2 has metadata result
4631   __ get_vm_result_2(Rsuper, Robj);
4632   __ pop_ptr(Robj);
4633   __ b(resolved);
4634 
4635   __ bind(throw_exception);
4636   // Come here on failure of subtype check
4637   __ profile_typecheck_failed(R1_tmp);
4638   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4639   __ b(Interpreter::_throw_ClassCastException_entry);
4640 
4641   // Get superklass in Rsuper and subklass in Rsub
4642   __ bind(quicked);
4643   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4644 
4645   __ bind(resolved);
4646   __ load_klass(Rsub, Robj);
4647 
4648   // Generate subtype check. Blows both tmps and Rtemp.
4649   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4650   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4651 
4652   // Come here on success
4653 
4654   // Collect counts on whether this check-cast sees NULLs a lot or not.
4655   if (ProfileInterpreter) {
4656     __ b(done);
4657     __ bind(is_null);
4658     __ profile_null_seen(R1_tmp);
4659   } else {
4660     __ bind(is_null);   // same as 'done'
4661   }
4662   __ bind(done);
4663 }
4664 
4665 
4666 void TemplateTable::instanceof() {
4667   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4668   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4669 
4670   transition(atos, itos);
4671   Label done, is_null, not_subtype, quicked, resolved;
4672 
4673   const Register Robj = R0_tos;
4674   const Register Rcpool = R2_tmp;
4675   const Register Rtags = R3_tmp;
4676   const Register Rindex = R4_tmp;
4677   const Register Rsuper = R3_tmp;
4678   const Register Rsub   = R4_tmp;
4679   const Register Rsubtype_check_tmp1 = R0_tmp;
4680   const Register Rsubtype_check_tmp2 = R1_tmp;
4681 
4682   __ cbz(Robj, is_null);
4683 
4684   __ load_klass(Rsub, Robj);
4685 
4686   // Get cpool & tags index
4687   __ get_cpool_and_tags(Rcpool, Rtags);
4688   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4689 
4690   // See if bytecode has already been quicked
4691   __ add(Rtemp, Rtags, Rindex);
4692 #ifdef AARCH64
4693   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4694   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4695   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4696 #else
4697   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4698 #endif // AARCH64
4699   __ cmp(Rtemp, JVM_CONSTANT_Class);
4700 
4701 #ifndef AARCH64
4702   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4703 #endif // !AARCH64
4704 
4705   __ b(quicked, eq);
4706 
4707   __ push(atos);
4708   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4709   // vm_result_2 has metadata result
4710   __ get_vm_result_2(Rsuper, Robj);
4711   __ pop_ptr(Robj);
4712   __ b(resolved);
4713 
4714   // Get superklass in Rsuper and subklass in Rsub
4715   __ bind(quicked);
4716   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4717 
4718   __ bind(resolved);
4719   __ load_klass(Rsub, Robj);
4720 
4721   // Generate subtype check. Blows both tmps and Rtemp.
4722   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4723 
4724   // Come here on success
4725   __ mov(R0_tos, 1);
4726   __ b(done);
4727 
4728   __ bind(not_subtype);
4729   // Come here on failure
4730   __ profile_typecheck_failed(R1_tmp);
4731   __ mov(R0_tos, 0);
4732 
4733   // Collect counts on whether this test sees NULLs a lot or not.
4734   if (ProfileInterpreter) {
4735     __ b(done);
4736     __ bind(is_null);
4737     __ profile_null_seen(R1_tmp);
4738   } else {
4739     __ bind(is_null);   // same as 'done'
4740   }
4741   __ bind(done);
4742 }
4743 
4744 
4745 //----------------------------------------------------------------------------------------------------
4746 // Breakpoints
4747 void TemplateTable::_breakpoint() {
4748 
4749   // Note: We get here even if we are single stepping..
4750   // jbug inists on setting breakpoints at every bytecode
4751   // even if we are in single step mode.
4752 
4753   transition(vtos, vtos);
4754 
4755   // get the unpatched byte code
4756   __ mov(R1, Rmethod);
4757   __ mov(R2, Rbcp);
4758   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4759 #ifdef AARCH64
4760   __ sxtw(Rtmp_save0, R0);
4761 #else
4762   __ mov(Rtmp_save0, R0);
4763 #endif // AARCH64
4764 
4765   // post the breakpoint event
4766   __ mov(R1, Rmethod);
4767   __ mov(R2, Rbcp);
4768   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4769 
4770   // complete the execution of original bytecode
4771   __ mov(R3_bytecode, Rtmp_save0);
4772   __ dispatch_only_normal(vtos);
4773 }
4774 
4775 
4776 //----------------------------------------------------------------------------------------------------
4777 // Exceptions
4778 
4779 void TemplateTable::athrow() {
4780   transition(atos, vtos);
4781   __ mov(Rexception_obj, R0_tos);
4782   __ null_check(Rexception_obj, Rtemp);
4783   __ b(Interpreter::throw_exception_entry());
4784 }
4785 
4786 
4787 //----------------------------------------------------------------------------------------------------
4788 // Synchronization
4789 //
4790 // Note: monitorenter & exit are symmetric routines; which is reflected
4791 //       in the assembly code structure as well
4792 //
4793 // Stack layout:
4794 //
4795 // [expressions  ] <--- Rstack_top        = expression stack top
4796 // ..
4797 // [expressions  ]
4798 // [monitor entry] <--- monitor block top = expression stack bot
4799 // ..
4800 // [monitor entry]
4801 // [frame data   ] <--- monitor block bot
4802 // ...
4803 // [saved FP     ] <--- FP
4804 
4805 
4806 void TemplateTable::monitorenter() {
4807   transition(atos, vtos);
4808 
4809   const Register Robj = R0_tos;
4810   const Register Rentry = R1_tmp;
4811 
4812   // check for NULL object
4813   __ null_check(Robj, Rtemp);
4814 
4815   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4816   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4817   Label allocate_monitor, allocated;
4818 
4819   // initialize entry pointer
4820   __ mov(Rentry, 0);                             // points to free slot or NULL
4821 
4822   // find a free slot in the monitor block (result in Rentry)
4823   { Label loop, exit;
4824     const Register Rcur = R2_tmp;
4825     const Register Rcur_obj = Rtemp;
4826     const Register Rbottom = R3_tmp;
4827     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4828 
4829     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4830                                  // points to current entry, starting with top-most entry
4831     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4832                                  // points to word before bottom of monitor block
4833 
4834     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4835 #ifndef AARCH64
4836     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4837                                                  // prefetch monitor's object for the first iteration
4838 #endif // !AARCH64
4839     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4840 
4841     __ bind(loop);
4842 #ifdef AARCH64
4843     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4844 #endif // AARCH64
4845     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4846     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4847 
4848     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4849     __ b(exit, eq);                              // if same object then stop searching
4850 
4851     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4852 
4853     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4854 #ifndef AARCH64
4855     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4856                                                  // prefetch monitor's object for the next iteration
4857 #endif // !AARCH64
4858     __ b(loop, ne);                              // if not at bottom then check this entry
4859     __ bind(exit);
4860   }
4861 
4862   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4863 
4864   __ bind(allocate_monitor);
4865 
4866   // allocate one if there's no free slot
4867   { Label loop;
4868     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4869 
4870     // 1. compute new pointers
4871 
4872 #ifdef AARCH64
4873     __ check_extended_sp(Rtemp);
4874     __ sub(SP, SP, entry_size);                  // adjust extended SP
4875     __ mov(Rtemp, SP);
4876     __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
4877 #endif // AARCH64
4878 
4879     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4880                                                  // old monitor block top / expression stack bottom
4881 
4882     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4883     __ check_stack_top_on_expansion();
4884 
4885     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4886 
4887     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4888 
4889     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4890                                                  // set new monitor block top
4891 
4892     // 2. move expression stack contents
4893 
4894     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4895 #ifndef AARCH64
4896     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4897 #endif // !AARCH64
4898     __ b(allocated, eq);
4899 
4900     __ bind(loop);
4901 #ifdef AARCH64
4902     __ ldr(Rtemp, Address(R2_tmp, entry_size));             // load expression stack word from old location
4903 #endif // AARCH64
4904     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4905                                                             // and advance to next word
4906     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4907 #ifndef AARCH64
4908     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4909 #endif // !AARCH64
4910     __ b(loop, ne);                                         // if not at bottom then copy next word
4911   }
4912 
4913   // call run-time routine
4914 
4915   // Rentry: points to monitor entry
4916   __ bind(allocated);
4917 
4918   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4919   // The object has already been poped from the stack, so the expression stack looks correct.
4920   __ add(Rbcp, Rbcp, 1);
4921 
4922   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
4923   __ lock_object(Rentry);
4924 
4925   // check to make sure this monitor doesn't cause stack overflow after locking
4926   __ save_bcp();  // in case of exception
4927   __ arm_stack_overflow_check(0, Rtemp);
4928 
4929   // The bcp has already been incremented. Just need to dispatch to next instruction.
4930   __ dispatch_next(vtos);
4931 }
4932 
4933 
4934 void TemplateTable::monitorexit() {
4935   transition(atos, vtos);
4936 
4937   const Register Robj = R0_tos;
4938   const Register Rcur = R1_tmp;
4939   const Register Rbottom = R2_tmp;
4940   const Register Rcur_obj = Rtemp;
4941 
4942   // check for NULL object
4943   __ null_check(Robj, Rtemp);
4944 
4945   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4946   Label found, throw_exception;
4947 
4948   // find matching slot
4949   { Label loop;
4950     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4951 
4952     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4953                                  // points to current entry, starting with top-most entry
4954     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4955                                  // points to word before bottom of monitor block
4956 
4957     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4958 #ifndef AARCH64
4959     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4960                                                  // prefetch monitor's object for the first iteration
4961 #endif // !AARCH64
4962     __ b(throw_exception, eq);                   // throw exception if there are now monitors
4963 
4964     __ bind(loop);
4965 #ifdef AARCH64
4966     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4967 #endif // AARCH64
4968     // check if current entry is for same object
4969     __ cmp(Rcur_obj, Robj);
4970     __ b(found, eq);                             // if same object then stop searching
4971     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4972     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4973 #ifndef AARCH64
4974     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4975 #endif // !AARCH64
4976     __ b (loop, ne);                             // if not at bottom then check this entry
4977   }
4978 
4979   // error handling. Unlocking was not block-structured
4980   __ bind(throw_exception);
4981   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4982   __ should_not_reach_here();
4983 
4984   // call run-time routine
4985   // Rcur: points to monitor entry
4986   __ bind(found);
4987   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
4988   __ unlock_object(Rcur);
4989   __ pop_ptr(Robj);                              // discard object
4990 }
4991 
4992 
4993 //----------------------------------------------------------------------------------------------------
4994 // Wide instructions
4995 
4996 void TemplateTable::wide() {
4997   transition(vtos, vtos);
4998   __ ldrb(R3_bytecode, at_bcp(1));
4999 
5000   InlinedAddress Ltable((address)Interpreter::_wentry_point);
5001   __ ldr_literal(Rtemp, Ltable);
5002   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
5003 
5004   __ nop(); // to avoid filling CPU pipeline with invalid instructions
5005   __ nop();
5006   __ bind_literal(Ltable);
5007 }
5008 
5009 
5010 //----------------------------------------------------------------------------------------------------
5011 // Multi arrays
5012 
5013 void TemplateTable::multianewarray() {
5014   transition(vtos, atos);
5015   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
5016 
5017   // last dim is on top of stack; we want address of first one:
5018   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
5019   // the latter wordSize to point to the beginning of the array.
5020   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5021   __ sub(R1, Rtemp, wordSize);
5022 
5023   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
5024   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5025   // MacroAssembler::StoreStore useless (included in the runtime exit path)
5026 }