1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "interpreter/interp_masm.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/templateTable.hpp"
  31 #include "memory/universe.hpp"
  32 #include "oops/cpCache.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/synchronizer.hpp"
  41 
  42 #define __ _masm->
  43 
  44 //----------------------------------------------------------------------------------------------------
  45 // Platform-dependent initialization
  46 
  47 void TemplateTable::pd_initialize() {
  48   // No arm specific initialization
  49 }
  50 
  51 //----------------------------------------------------------------------------------------------------
  52 // Address computation
  53 
  54 // local variables
  55 static inline Address iaddress(int n)            {
  56   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  57 }
  58 
  59 static inline Address laddress(int n)            { return iaddress(n + 1); }
  60 #ifndef AARCH64
  61 static inline Address haddress(int n)            { return iaddress(n + 0); }
  62 #endif // !AARCH64
  63 
  64 static inline Address faddress(int n)            { return iaddress(n); }
  65 static inline Address daddress(int n)            { return laddress(n); }
  66 static inline Address aaddress(int n)            { return iaddress(n); }
  67 
  68 
  69 void TemplateTable::get_local_base_addr(Register r, Register index) {
  70   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  71 }
  72 
  73 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  74 #ifdef AARCH64
  75   get_local_base_addr(scratch, index);
  76   return Address(scratch);
  77 #else
  78   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  79 #endif // AARCH64
  80 }
  81 
  82 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  83   return load_iaddress(index, scratch);
  84 }
  85 
  86 Address TemplateTable::load_faddress(Register index, Register scratch) {
  87 #ifdef __SOFTFP__
  88   return load_iaddress(index, scratch);
  89 #else
  90   get_local_base_addr(scratch, index);
  91   return Address(scratch);
  92 #endif // __SOFTFP__
  93 }
  94 
  95 Address TemplateTable::load_daddress(Register index, Register scratch) {
  96   get_local_base_addr(scratch, index);
  97   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  98 }
  99 
 100 // At top of Java expression stack which may be different than SP.
 101 // It isn't for category 1 objects.
 102 static inline Address at_tos() {
 103   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
 104 }
 105 
 106 static inline Address at_tos_p1() {
 107   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 108 }
 109 
 110 static inline Address at_tos_p2() {
 111   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 112 }
 113 
 114 
 115 // 32-bit ARM:
 116 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 117 // separate ldr instructions (supports nonadjacent values).
 118 // Used for longs in all modes, and for doubles in SOFTFP mode.
 119 //
 120 // AArch64: loads long local into R0_tos.
 121 //
 122 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 123   const Register Rlocal_base = tmp;
 124   assert_different_registers(Rlocal_index, tmp);
 125 
 126   get_local_base_addr(Rlocal_base, Rlocal_index);
 127 #ifdef AARCH64
 128   __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 129 #else
 130   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 131   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 132 #endif // AARCH64
 133 }
 134 
 135 
 136 // 32-bit ARM:
 137 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 138 // separate str instructions (supports nonadjacent values).
 139 // Used for longs in all modes, and for doubles in SOFTFP mode
 140 //
 141 // AArch64: stores R0_tos to long local.
 142 //
 143 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 144   const Register Rlocal_base = tmp;
 145   assert_different_registers(Rlocal_index, tmp);
 146 
 147   get_local_base_addr(Rlocal_base, Rlocal_index);
 148 #ifdef AARCH64
 149   __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 150 #else
 151   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 152   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 153 #endif // AARCH64
 154 }
 155 
 156 // Returns address of Java array element using temp register as address base.
 157 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 158   int logElemSize = exact_log2(type2aelembytes(elemType));
 159   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 160   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 161 }
 162 
 163 //----------------------------------------------------------------------------------------------------
 164 // Condition conversion
 165 AsmCondition convNegCond(TemplateTable::Condition cc) {
 166   switch (cc) {
 167     case TemplateTable::equal        : return ne;
 168     case TemplateTable::not_equal    : return eq;
 169     case TemplateTable::less         : return ge;
 170     case TemplateTable::less_equal   : return gt;
 171     case TemplateTable::greater      : return le;
 172     case TemplateTable::greater_equal: return lt;
 173   }
 174   ShouldNotReachHere();
 175   return nv;
 176 }
 177 
 178 //----------------------------------------------------------------------------------------------------
 179 // Miscelaneous helper routines
 180 
 181 // Store an oop (or NULL) at the address described by obj.
 182 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 183 // Also destroys new_val and obj.base().
 184 static void do_oop_store(InterpreterMacroAssembler* _masm,
 185                          Address obj,
 186                          Register new_val,
 187                          Register tmp1,
 188                          Register tmp2,
 189                          Register tmp3,
 190                          BarrierSet::Name barrier,
 191                          bool precise,
 192                          bool is_null) {
 193 
 194   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 195   switch (barrier) {
 196 #if INCLUDE_ALL_GCS
 197     case BarrierSet::G1BarrierSet:
 198       {
 199         // flatten object address if needed
 200         assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
 201 
 202         const Register store_addr = obj.base();
 203         if (obj.index() != noreg) {
 204           assert (obj.disp() == 0, "index or displacement, not both");
 205 #ifdef AARCH64
 206           __ add(store_addr, obj.base(), obj.index(), obj.extend(), obj.shift_imm());
 207 #else
 208           assert(obj.offset_op() == add_offset, "addition is expected");
 209           __ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
 210 #endif // AARCH64
 211         } else if (obj.disp() != 0) {
 212           __ add(store_addr, obj.base(), obj.disp());
 213         }
 214 
 215         __ g1_write_barrier_pre(store_addr, new_val, tmp1, tmp2, tmp3);
 216         if (is_null) {
 217           __ store_heap_oop_null(new_val, Address(store_addr));
 218         } else {
 219           // G1 barrier needs uncompressed oop for region cross check.
 220           Register val_to_store = new_val;
 221           if (UseCompressedOops) {
 222             val_to_store = tmp1;
 223             __ mov(val_to_store, new_val);
 224           }
 225           __ store_heap_oop(val_to_store, Address(store_addr)); // blows val_to_store:
 226           val_to_store = noreg;
 227           __ g1_write_barrier_post(store_addr, new_val, tmp1, tmp2, tmp3);
 228         }
 229       }
 230       break;
 231 #endif // INCLUDE_ALL_GCS
 232     case BarrierSet::CardTableBarrierSet:
 233       {
 234         if (is_null) {
 235           __ store_heap_oop_null(new_val, obj);
 236         } else {
 237           assert (!precise || (obj.index() == noreg && obj.disp() == 0),
 238                   "store check address should be calculated beforehand");
 239 
 240           __ store_check_part1(tmp1);
 241           __ store_heap_oop(new_val, obj); // blows new_val:
 242           new_val = noreg;
 243           __ store_check_part2(obj.base(), tmp1, tmp2);
 244         }
 245       }
 246       break;
 247     case BarrierSet::ModRef:
 248       ShouldNotReachHere();
 249       break;
 250     default:
 251       ShouldNotReachHere();
 252       break;
 253   }
 254 }
 255 
 256 Address TemplateTable::at_bcp(int offset) {
 257   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 258   return Address(Rbcp, offset);
 259 }
 260 
 261 
 262 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
 263 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 264                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 265                                    int byte_no) {
 266   assert_different_registers(bc_reg, temp_reg);
 267   if (!RewriteBytecodes)  return;
 268   Label L_patch_done;
 269 
 270   switch (bc) {
 271   case Bytecodes::_fast_aputfield:
 272   case Bytecodes::_fast_bputfield:
 273   case Bytecodes::_fast_zputfield:
 274   case Bytecodes::_fast_cputfield:
 275   case Bytecodes::_fast_dputfield:
 276   case Bytecodes::_fast_fputfield:
 277   case Bytecodes::_fast_iputfield:
 278   case Bytecodes::_fast_lputfield:
 279   case Bytecodes::_fast_sputfield:
 280     {
 281       // We skip bytecode quickening for putfield instructions when
 282       // the put_code written to the constant pool cache is zero.
 283       // This is required so that every execution of this instruction
 284       // calls out to InterpreterRuntime::resolve_get_put to do
 285       // additional, required work.
 286       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 287       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 288       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 289       __ mov(bc_reg, bc);
 290       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 291     }
 292     break;
 293   default:
 294     assert(byte_no == -1, "sanity");
 295     // the pair bytecodes have already done the load.
 296     if (load_bc_into_bc_reg) {
 297       __ mov(bc_reg, bc);
 298     }
 299   }
 300 
 301   if (__ can_post_breakpoint()) {
 302     Label L_fast_patch;
 303     // if a breakpoint is present we can't rewrite the stream directly
 304     __ ldrb(temp_reg, at_bcp(0));
 305     __ cmp(temp_reg, Bytecodes::_breakpoint);
 306     __ b(L_fast_patch, ne);
 307     if (bc_reg != R3) {
 308       __ mov(R3, bc_reg);
 309     }
 310     __ mov(R1, Rmethod);
 311     __ mov(R2, Rbcp);
 312     // Let breakpoint table handling rewrite to quicker bytecode
 313     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 314     __ b(L_patch_done);
 315     __ bind(L_fast_patch);
 316   }
 317 
 318 #ifdef ASSERT
 319   Label L_okay;
 320   __ ldrb(temp_reg, at_bcp(0));
 321   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 322   __ b(L_okay, eq);
 323   __ cmp(temp_reg, bc_reg);
 324   __ b(L_okay, eq);
 325   __ stop("patching the wrong bytecode");
 326   __ bind(L_okay);
 327 #endif
 328 
 329   // patch bytecode
 330   __ strb(bc_reg, at_bcp(0));
 331   __ bind(L_patch_done);
 332 }
 333 
 334 //----------------------------------------------------------------------------------------------------
 335 // Individual instructions
 336 
 337 void TemplateTable::nop() {
 338   transition(vtos, vtos);
 339   // nothing to do
 340 }
 341 
 342 void TemplateTable::shouldnotreachhere() {
 343   transition(vtos, vtos);
 344   __ stop("shouldnotreachhere bytecode");
 345 }
 346 
 347 
 348 
 349 void TemplateTable::aconst_null() {
 350   transition(vtos, atos);
 351   __ mov(R0_tos, 0);
 352 }
 353 
 354 
 355 void TemplateTable::iconst(int value) {
 356   transition(vtos, itos);
 357   __ mov_slow(R0_tos, value);
 358 }
 359 
 360 
 361 void TemplateTable::lconst(int value) {
 362   transition(vtos, ltos);
 363   assert((value == 0) || (value == 1), "unexpected long constant");
 364   __ mov(R0_tos, value);
 365 #ifndef AARCH64
 366   __ mov(R1_tos_hi, 0);
 367 #endif // !AARCH64
 368 }
 369 
 370 
 371 void TemplateTable::fconst(int value) {
 372   transition(vtos, ftos);
 373 #ifdef AARCH64
 374   switch(value) {
 375   case 0:   __ fmov_sw(S0_tos, ZR);    break;
 376   case 1:   __ fmov_s (S0_tos, 0x70);  break;
 377   case 2:   __ fmov_s (S0_tos, 0x00);  break;
 378   default:  ShouldNotReachHere();      break;
 379   }
 380 #else
 381   const int zero = 0;         // 0.0f
 382   const int one = 0x3f800000; // 1.0f
 383   const int two = 0x40000000; // 2.0f
 384 
 385   switch(value) {
 386   case 0:   __ mov(R0_tos, zero);   break;
 387   case 1:   __ mov(R0_tos, one);    break;
 388   case 2:   __ mov(R0_tos, two);    break;
 389   default:  ShouldNotReachHere();   break;
 390   }
 391 
 392 #ifndef __SOFTFP__
 393   __ fmsr(S0_tos, R0_tos);
 394 #endif // !__SOFTFP__
 395 #endif // AARCH64
 396 }
 397 
 398 
 399 void TemplateTable::dconst(int value) {
 400   transition(vtos, dtos);
 401 #ifdef AARCH64
 402   switch(value) {
 403   case 0:   __ fmov_dx(D0_tos, ZR);    break;
 404   case 1:   __ fmov_d (D0_tos, 0x70);  break;
 405   default:  ShouldNotReachHere();      break;
 406   }
 407 #else
 408   const int one_lo = 0;            // low part of 1.0
 409   const int one_hi = 0x3ff00000;   // high part of 1.0
 410 
 411   if (value == 0) {
 412 #ifdef __SOFTFP__
 413     __ mov(R0_tos_lo, 0);
 414     __ mov(R1_tos_hi, 0);
 415 #else
 416     __ mov(R0_tmp, 0);
 417     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 418 #endif // __SOFTFP__
 419   } else if (value == 1) {
 420     __ mov(R0_tos_lo, one_lo);
 421     __ mov_slow(R1_tos_hi, one_hi);
 422 #ifndef __SOFTFP__
 423     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 424 #endif // !__SOFTFP__
 425   } else {
 426     ShouldNotReachHere();
 427   }
 428 #endif // AARCH64
 429 }
 430 
 431 
 432 void TemplateTable::bipush() {
 433   transition(vtos, itos);
 434   __ ldrsb(R0_tos, at_bcp(1));
 435 }
 436 
 437 
 438 void TemplateTable::sipush() {
 439   transition(vtos, itos);
 440   __ ldrsb(R0_tmp, at_bcp(1));
 441   __ ldrb(R1_tmp, at_bcp(2));
 442   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 443 }
 444 
 445 
 446 void TemplateTable::ldc(bool wide) {
 447   transition(vtos, vtos);
 448   Label fastCase, Done;
 449 
 450   const Register Rindex = R1_tmp;
 451   const Register Rcpool = R2_tmp;
 452   const Register Rtags  = R3_tmp;
 453   const Register RtagType = R3_tmp;
 454 
 455   if (wide) {
 456     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 457   } else {
 458     __ ldrb(Rindex, at_bcp(1));
 459   }
 460   __ get_cpool_and_tags(Rcpool, Rtags);
 461 
 462   const int base_offset = ConstantPool::header_size() * wordSize;
 463   const int tags_offset = Array<u1>::base_offset_in_bytes();
 464 
 465   // get const type
 466   __ add(Rtemp, Rtags, tags_offset);
 467 #ifdef AARCH64
 468   __ add(Rtemp, Rtemp, Rindex);
 469   __ ldarb(RtagType, Rtemp);  // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
 470 #else
 471   __ ldrb(RtagType, Address(Rtemp, Rindex));
 472   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 473 #endif // AARCH64
 474 
 475   // unresolved class - get the resolved class
 476   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 477 
 478   // unresolved class in error (resolution failed) - call into runtime
 479   // so that the same error from first resolution attempt is thrown.
 480 #ifdef AARCH64
 481   __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
 482   __ cond_cmp(RtagType, Rtemp, ne);
 483 #else
 484   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 485 #endif // AARCH64
 486 
 487   // resolved class - need to call vm to get java mirror of the class
 488   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 489 
 490   __ b(fastCase, ne);
 491 
 492   // slow case - call runtime
 493   __ mov(R1, wide);
 494   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 495   __ push(atos);
 496   __ b(Done);
 497 
 498   // int, float, String
 499   __ bind(fastCase);
 500 #ifdef ASSERT
 501   { Label L;
 502     __ cmp(RtagType, JVM_CONSTANT_Integer);
 503     __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 504     __ b(L, eq);
 505     __ stop("unexpected tag type in ldc");
 506     __ bind(L);
 507   }
 508 #endif // ASSERT
 509   // itos, ftos
 510   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 511   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 512 
 513   // floats and ints are placed on stack in the same way, so
 514   // we can use push(itos) to transfer float value without VFP
 515   __ push(itos);
 516   __ bind(Done);
 517 }
 518 
 519 // Fast path for caching oop constants.
 520 void TemplateTable::fast_aldc(bool wide) {
 521   transition(vtos, atos);
 522   int index_size = wide ? sizeof(u2) : sizeof(u1);
 523   Label resolved;
 524 
 525   // We are resolved if the resolved reference cache entry contains a
 526   // non-null object (CallSite, etc.)
 527   assert_different_registers(R0_tos, R2_tmp);
 528   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 529   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 530   __ cbnz(R0_tos, resolved);
 531 
 532   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 533 
 534   // first time invocation - must resolve first
 535   __ mov(R1, (int)bytecode());
 536   __ call_VM(R0_tos, entry, R1);
 537   __ bind(resolved);
 538 
 539   if (VerifyOops) {
 540     __ verify_oop(R0_tos);
 541   }
 542 }
 543 
 544 void TemplateTable::ldc2_w() {
 545   transition(vtos, vtos);
 546   const Register Rtags  = R2_tmp;
 547   const Register Rindex = R3_tmp;
 548   const Register Rcpool = R4_tmp;
 549   const Register Rbase  = R5_tmp;
 550 
 551   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 552 
 553   __ get_cpool_and_tags(Rcpool, Rtags);
 554   const int base_offset = ConstantPool::header_size() * wordSize;
 555   const int tags_offset = Array<u1>::base_offset_in_bytes();
 556 
 557   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 558 
 559 #ifdef __ABI_HARD__
 560   Label Long, exit;
 561   // get type from tags
 562   __ add(Rtemp, Rtags, tags_offset);
 563   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 564   __ cmp(Rtemp, JVM_CONSTANT_Double);
 565   __ b(Long, ne);
 566   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 567 
 568   __ push(dtos);
 569   __ b(exit);
 570   __ bind(Long);
 571 #endif
 572 
 573 #ifdef AARCH64
 574   __ ldr(R0_tos, Address(Rbase, base_offset));
 575 #else
 576   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 577   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 578 #endif // AARCH64
 579   __ push(ltos);
 580 
 581 #ifdef __ABI_HARD__
 582   __ bind(exit);
 583 #endif
 584 }
 585 
 586 
 587 void TemplateTable::locals_index(Register reg, int offset) {
 588   __ ldrb(reg, at_bcp(offset));
 589 }
 590 
 591 void TemplateTable::iload() {
 592   iload_internal();
 593 }
 594 
 595 void TemplateTable::nofast_iload() {
 596   iload_internal(may_not_rewrite);
 597 }
 598 
 599 void TemplateTable::iload_internal(RewriteControl rc) {
 600   transition(vtos, itos);
 601 
 602   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 603     Label rewrite, done;
 604     const Register next_bytecode = R1_tmp;
 605     const Register target_bytecode = R2_tmp;
 606 
 607     // get next byte
 608     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 609     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 610     // last two iloads in a pair.  Comparing against fast_iload means that
 611     // the next bytecode is neither an iload or a caload, and therefore
 612     // an iload pair.
 613     __ cmp(next_bytecode, Bytecodes::_iload);
 614     __ b(done, eq);
 615 
 616     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 617     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 618     __ b(rewrite, eq);
 619 
 620     // if _caload, rewrite to fast_icaload
 621     __ cmp(next_bytecode, Bytecodes::_caload);
 622     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 623     __ b(rewrite, eq);
 624 
 625     // rewrite so iload doesn't check again.
 626     __ mov(target_bytecode, Bytecodes::_fast_iload);
 627 
 628     // rewrite
 629     // R2: fast bytecode
 630     __ bind(rewrite);
 631     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 632     __ bind(done);
 633   }
 634 
 635   // Get the local value into tos
 636   const Register Rlocal_index = R1_tmp;
 637   locals_index(Rlocal_index);
 638   Address local = load_iaddress(Rlocal_index, Rtemp);
 639   __ ldr_s32(R0_tos, local);
 640 }
 641 
 642 
 643 void TemplateTable::fast_iload2() {
 644   transition(vtos, itos);
 645   const Register Rlocal_index = R1_tmp;
 646 
 647   locals_index(Rlocal_index);
 648   Address local = load_iaddress(Rlocal_index, Rtemp);
 649   __ ldr_s32(R0_tos, local);
 650   __ push(itos);
 651 
 652   locals_index(Rlocal_index, 3);
 653   local = load_iaddress(Rlocal_index, Rtemp);
 654   __ ldr_s32(R0_tos, local);
 655 }
 656 
 657 void TemplateTable::fast_iload() {
 658   transition(vtos, itos);
 659   const Register Rlocal_index = R1_tmp;
 660 
 661   locals_index(Rlocal_index);
 662   Address local = load_iaddress(Rlocal_index, Rtemp);
 663   __ ldr_s32(R0_tos, local);
 664 }
 665 
 666 
 667 void TemplateTable::lload() {
 668   transition(vtos, ltos);
 669   const Register Rlocal_index = R2_tmp;
 670 
 671   locals_index(Rlocal_index);
 672   load_category2_local(Rlocal_index, R3_tmp);
 673 }
 674 
 675 
 676 void TemplateTable::fload() {
 677   transition(vtos, ftos);
 678   const Register Rlocal_index = R2_tmp;
 679 
 680   // Get the local value into tos
 681   locals_index(Rlocal_index);
 682   Address local = load_faddress(Rlocal_index, Rtemp);
 683 #ifdef __SOFTFP__
 684   __ ldr(R0_tos, local);
 685 #else
 686   __ ldr_float(S0_tos, local);
 687 #endif // __SOFTFP__
 688 }
 689 
 690 
 691 void TemplateTable::dload() {
 692   transition(vtos, dtos);
 693   const Register Rlocal_index = R2_tmp;
 694 
 695   locals_index(Rlocal_index);
 696 
 697 #ifdef __SOFTFP__
 698   load_category2_local(Rlocal_index, R3_tmp);
 699 #else
 700   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 701 #endif // __SOFTFP__
 702 }
 703 
 704 
 705 void TemplateTable::aload() {
 706   transition(vtos, atos);
 707   const Register Rlocal_index = R1_tmp;
 708 
 709   locals_index(Rlocal_index);
 710   Address local = load_aaddress(Rlocal_index, Rtemp);
 711   __ ldr(R0_tos, local);
 712 }
 713 
 714 
 715 void TemplateTable::locals_index_wide(Register reg) {
 716   assert_different_registers(reg, Rtemp);
 717   __ ldrb(Rtemp, at_bcp(2));
 718   __ ldrb(reg, at_bcp(3));
 719   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 720 }
 721 
 722 
 723 void TemplateTable::wide_iload() {
 724   transition(vtos, itos);
 725   const Register Rlocal_index = R2_tmp;
 726 
 727   locals_index_wide(Rlocal_index);
 728   Address local = load_iaddress(Rlocal_index, Rtemp);
 729   __ ldr_s32(R0_tos, local);
 730 }
 731 
 732 
 733 void TemplateTable::wide_lload() {
 734   transition(vtos, ltos);
 735   const Register Rlocal_index = R2_tmp;
 736   const Register Rlocal_base = R3_tmp;
 737 
 738   locals_index_wide(Rlocal_index);
 739   load_category2_local(Rlocal_index, R3_tmp);
 740 }
 741 
 742 
 743 void TemplateTable::wide_fload() {
 744   transition(vtos, ftos);
 745   const Register Rlocal_index = R2_tmp;
 746 
 747   locals_index_wide(Rlocal_index);
 748   Address local = load_faddress(Rlocal_index, Rtemp);
 749 #ifdef __SOFTFP__
 750   __ ldr(R0_tos, local);
 751 #else
 752   __ ldr_float(S0_tos, local);
 753 #endif // __SOFTFP__
 754 }
 755 
 756 
 757 void TemplateTable::wide_dload() {
 758   transition(vtos, dtos);
 759   const Register Rlocal_index = R2_tmp;
 760 
 761   locals_index_wide(Rlocal_index);
 762 #ifdef __SOFTFP__
 763   load_category2_local(Rlocal_index, R3_tmp);
 764 #else
 765   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 766 #endif // __SOFTFP__
 767 }
 768 
 769 
 770 void TemplateTable::wide_aload() {
 771   transition(vtos, atos);
 772   const Register Rlocal_index = R2_tmp;
 773 
 774   locals_index_wide(Rlocal_index);
 775   Address local = load_aaddress(Rlocal_index, Rtemp);
 776   __ ldr(R0_tos, local);
 777 }
 778 
 779 void TemplateTable::index_check(Register array, Register index) {
 780   // Pop ptr into array
 781   __ pop_ptr(array);
 782   index_check_without_pop(array, index);
 783 }
 784 
 785 void TemplateTable::index_check_without_pop(Register array, Register index) {
 786   assert_different_registers(array, index, Rtemp);
 787   // check array
 788   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 789   // check index
 790   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 791   __ cmp_32(index, Rtemp);
 792   if (index != R4_ArrayIndexOutOfBounds_index) {
 793     // convention with generate_ArrayIndexOutOfBounds_handler()
 794     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 795   }
 796   __ mov(R1, array, hs);
 797   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 798 }
 799 
 800 
 801 void TemplateTable::iaload() {
 802   transition(itos, itos);
 803   const Register Rarray = R1_tmp;
 804   const Register Rindex = R0_tos;
 805 
 806   index_check(Rarray, Rindex);
 807   __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
 808 }
 809 
 810 
 811 void TemplateTable::laload() {
 812   transition(itos, ltos);
 813   const Register Rarray = R1_tmp;
 814   const Register Rindex = R0_tos;
 815 
 816   index_check(Rarray, Rindex);
 817 
 818 #ifdef AARCH64
 819   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 820 #else
 821   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 822   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
 823   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 824 #endif // AARCH64
 825 }
 826 
 827 
 828 void TemplateTable::faload() {
 829   transition(itos, ftos);
 830   const Register Rarray = R1_tmp;
 831   const Register Rindex = R0_tos;
 832 
 833   index_check(Rarray, Rindex);
 834 
 835   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
 836 #ifdef __SOFTFP__
 837   __ ldr(R0_tos, addr);
 838 #else
 839   __ ldr_float(S0_tos, addr);
 840 #endif // __SOFTFP__
 841 }
 842 
 843 
 844 void TemplateTable::daload() {
 845   transition(itos, dtos);
 846   const Register Rarray = R1_tmp;
 847   const Register Rindex = R0_tos;
 848 
 849   index_check(Rarray, Rindex);
 850 
 851 #ifdef __SOFTFP__
 852   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 853   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
 854   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 855 #else
 856   __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
 857 #endif // __SOFTFP__
 858 }
 859 
 860 
 861 void TemplateTable::aaload() {
 862   transition(itos, atos);
 863   const Register Rarray = R1_tmp;
 864   const Register Rindex = R0_tos;
 865 
 866   index_check(Rarray, Rindex);
 867   __ load_heap_oop(R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp));
 868 }
 869 
 870 
 871 void TemplateTable::baload() {
 872   transition(itos, itos);
 873   const Register Rarray = R1_tmp;
 874   const Register Rindex = R0_tos;
 875 
 876   index_check(Rarray, Rindex);
 877   __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
 878 }
 879 
 880 
 881 void TemplateTable::caload() {
 882   transition(itos, itos);
 883   const Register Rarray = R1_tmp;
 884   const Register Rindex = R0_tos;
 885 
 886   index_check(Rarray, Rindex);
 887   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 888 }
 889 
 890 
 891 // iload followed by caload frequent pair
 892 void TemplateTable::fast_icaload() {
 893   transition(vtos, itos);
 894   const Register Rlocal_index = R1_tmp;
 895   const Register Rarray = R1_tmp;
 896   const Register Rindex = R4_tmp; // index_check prefers index on R4
 897   assert_different_registers(Rlocal_index, Rindex);
 898   assert_different_registers(Rarray, Rindex);
 899 
 900   // load index out of locals
 901   locals_index(Rlocal_index);
 902   Address local = load_iaddress(Rlocal_index, Rtemp);
 903   __ ldr_s32(Rindex, local);
 904 
 905   // get array element
 906   index_check(Rarray, Rindex);
 907   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 908 }
 909 
 910 
 911 void TemplateTable::saload() {
 912   transition(itos, itos);
 913   const Register Rarray = R1_tmp;
 914   const Register Rindex = R0_tos;
 915 
 916   index_check(Rarray, Rindex);
 917   __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
 918 }
 919 
 920 
 921 void TemplateTable::iload(int n) {
 922   transition(vtos, itos);
 923   __ ldr_s32(R0_tos, iaddress(n));
 924 }
 925 
 926 
 927 void TemplateTable::lload(int n) {
 928   transition(vtos, ltos);
 929 #ifdef AARCH64
 930   __ ldr(R0_tos, laddress(n));
 931 #else
 932   __ ldr(R0_tos_lo, laddress(n));
 933   __ ldr(R1_tos_hi, haddress(n));
 934 #endif // AARCH64
 935 }
 936 
 937 
 938 void TemplateTable::fload(int n) {
 939   transition(vtos, ftos);
 940 #ifdef __SOFTFP__
 941   __ ldr(R0_tos, faddress(n));
 942 #else
 943   __ ldr_float(S0_tos, faddress(n));
 944 #endif // __SOFTFP__
 945 }
 946 
 947 
 948 void TemplateTable::dload(int n) {
 949   transition(vtos, dtos);
 950 #ifdef __SOFTFP__
 951   __ ldr(R0_tos_lo, laddress(n));
 952   __ ldr(R1_tos_hi, haddress(n));
 953 #else
 954   __ ldr_double(D0_tos, daddress(n));
 955 #endif // __SOFTFP__
 956 }
 957 
 958 
 959 void TemplateTable::aload(int n) {
 960   transition(vtos, atos);
 961   __ ldr(R0_tos, aaddress(n));
 962 }
 963 
 964 void TemplateTable::aload_0() {
 965   aload_0_internal();
 966 }
 967 
 968 void TemplateTable::nofast_aload_0() {
 969   aload_0_internal(may_not_rewrite);
 970 }
 971 
 972 void TemplateTable::aload_0_internal(RewriteControl rc) {
 973   transition(vtos, atos);
 974   // According to bytecode histograms, the pairs:
 975   //
 976   // _aload_0, _fast_igetfield
 977   // _aload_0, _fast_agetfield
 978   // _aload_0, _fast_fgetfield
 979   //
 980   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
 981   // bytecode checks if the next bytecode is either _fast_igetfield,
 982   // _fast_agetfield or _fast_fgetfield and then rewrites the
 983   // current bytecode into a pair bytecode; otherwise it rewrites the current
 984   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
 985   //
 986   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
 987   //       otherwise we may miss an opportunity for a pair.
 988   //
 989   // Also rewrite frequent pairs
 990   //   aload_0, aload_1
 991   //   aload_0, iload_1
 992   // These bytecodes with a small amount of code are most profitable to rewrite
 993   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 994     Label rewrite, done;
 995     const Register next_bytecode = R1_tmp;
 996     const Register target_bytecode = R2_tmp;
 997 
 998     // get next byte
 999     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
1000 
1001     // if _getfield then wait with rewrite
1002     __ cmp(next_bytecode, Bytecodes::_getfield);
1003     __ b(done, eq);
1004 
1005     // if _igetfield then rewrite to _fast_iaccess_0
1006     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1007     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1008     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1009     __ b(rewrite, eq);
1010 
1011     // if _agetfield then rewrite to _fast_aaccess_0
1012     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1013     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1014     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1015     __ b(rewrite, eq);
1016 
1017     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1018     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1019     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1020 
1021     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1022 #ifdef AARCH64
1023     __ mov(Rtemp, Bytecodes::_fast_faccess_0);
1024     __ mov(target_bytecode, Bytecodes::_fast_aload_0);
1025     __ mov(target_bytecode, Rtemp, eq);
1026 #else
1027     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1028     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1029 #endif // AARCH64
1030 
1031     // rewrite
1032     __ bind(rewrite);
1033     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1034 
1035     __ bind(done);
1036   }
1037 
1038   aload(0);
1039 }
1040 
1041 void TemplateTable::istore() {
1042   transition(itos, vtos);
1043   const Register Rlocal_index = R2_tmp;
1044 
1045   locals_index(Rlocal_index);
1046   Address local = load_iaddress(Rlocal_index, Rtemp);
1047   __ str_32(R0_tos, local);
1048 }
1049 
1050 
1051 void TemplateTable::lstore() {
1052   transition(ltos, vtos);
1053   const Register Rlocal_index = R2_tmp;
1054 
1055   locals_index(Rlocal_index);
1056   store_category2_local(Rlocal_index, R3_tmp);
1057 }
1058 
1059 
1060 void TemplateTable::fstore() {
1061   transition(ftos, vtos);
1062   const Register Rlocal_index = R2_tmp;
1063 
1064   locals_index(Rlocal_index);
1065   Address local = load_faddress(Rlocal_index, Rtemp);
1066 #ifdef __SOFTFP__
1067   __ str(R0_tos, local);
1068 #else
1069   __ str_float(S0_tos, local);
1070 #endif // __SOFTFP__
1071 }
1072 
1073 
1074 void TemplateTable::dstore() {
1075   transition(dtos, vtos);
1076   const Register Rlocal_index = R2_tmp;
1077 
1078   locals_index(Rlocal_index);
1079 
1080 #ifdef __SOFTFP__
1081   store_category2_local(Rlocal_index, R3_tmp);
1082 #else
1083   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1084 #endif // __SOFTFP__
1085 }
1086 
1087 
1088 void TemplateTable::astore() {
1089   transition(vtos, vtos);
1090   const Register Rlocal_index = R1_tmp;
1091 
1092   __ pop_ptr(R0_tos);
1093   locals_index(Rlocal_index);
1094   Address local = load_aaddress(Rlocal_index, Rtemp);
1095   __ str(R0_tos, local);
1096 }
1097 
1098 
1099 void TemplateTable::wide_istore() {
1100   transition(vtos, vtos);
1101   const Register Rlocal_index = R2_tmp;
1102 
1103   __ pop_i(R0_tos);
1104   locals_index_wide(Rlocal_index);
1105   Address local = load_iaddress(Rlocal_index, Rtemp);
1106   __ str_32(R0_tos, local);
1107 }
1108 
1109 
1110 void TemplateTable::wide_lstore() {
1111   transition(vtos, vtos);
1112   const Register Rlocal_index = R2_tmp;
1113   const Register Rlocal_base = R3_tmp;
1114 
1115 #ifdef AARCH64
1116   __ pop_l(R0_tos);
1117 #else
1118   __ pop_l(R0_tos_lo, R1_tos_hi);
1119 #endif // AARCH64
1120 
1121   locals_index_wide(Rlocal_index);
1122   store_category2_local(Rlocal_index, R3_tmp);
1123 }
1124 
1125 
1126 void TemplateTable::wide_fstore() {
1127   wide_istore();
1128 }
1129 
1130 
1131 void TemplateTable::wide_dstore() {
1132   wide_lstore();
1133 }
1134 
1135 
1136 void TemplateTable::wide_astore() {
1137   transition(vtos, vtos);
1138   const Register Rlocal_index = R2_tmp;
1139 
1140   __ pop_ptr(R0_tos);
1141   locals_index_wide(Rlocal_index);
1142   Address local = load_aaddress(Rlocal_index, Rtemp);
1143   __ str(R0_tos, local);
1144 }
1145 
1146 
1147 void TemplateTable::iastore() {
1148   transition(itos, vtos);
1149   const Register Rindex = R4_tmp; // index_check prefers index in R4
1150   const Register Rarray = R3_tmp;
1151   // R0_tos: value
1152 
1153   __ pop_i(Rindex);
1154   index_check(Rarray, Rindex);
1155   __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1156 }
1157 
1158 
1159 void TemplateTable::lastore() {
1160   transition(ltos, vtos);
1161   const Register Rindex = R4_tmp; // index_check prefers index in R4
1162   const Register Rarray = R3_tmp;
1163   // R0_tos_lo:R1_tos_hi: value
1164 
1165   __ pop_i(Rindex);
1166   index_check(Rarray, Rindex);
1167 
1168 #ifdef AARCH64
1169   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1170 #else
1171   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1172   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1173   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1174 #endif // AARCH64
1175 }
1176 
1177 
1178 void TemplateTable::fastore() {
1179   transition(ftos, vtos);
1180   const Register Rindex = R4_tmp; // index_check prefers index in R4
1181   const Register Rarray = R3_tmp;
1182   // S0_tos/R0_tos: value
1183 
1184   __ pop_i(Rindex);
1185   index_check(Rarray, Rindex);
1186   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1187 
1188 #ifdef __SOFTFP__
1189   __ str(R0_tos, addr);
1190 #else
1191   __ str_float(S0_tos, addr);
1192 #endif // __SOFTFP__
1193 }
1194 
1195 
1196 void TemplateTable::dastore() {
1197   transition(dtos, vtos);
1198   const Register Rindex = R4_tmp; // index_check prefers index in R4
1199   const Register Rarray = R3_tmp;
1200   // D0_tos / R0_tos_lo:R1_to_hi: value
1201 
1202   __ pop_i(Rindex);
1203   index_check(Rarray, Rindex);
1204 
1205 #ifdef __SOFTFP__
1206   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1207   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1208   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1209 #else
1210   __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1211 #endif // __SOFTFP__
1212 }
1213 
1214 
1215 void TemplateTable::aastore() {
1216   transition(vtos, vtos);
1217   Label is_null, throw_array_store, done;
1218 
1219   const Register Raddr_1   = R1_tmp;
1220   const Register Rvalue_2  = R2_tmp;
1221   const Register Rarray_3  = R3_tmp;
1222   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1223   const Register Rsub_5    = R5_tmp;
1224   const Register Rsuper_LR = LR_tmp;
1225 
1226   // stack: ..., array, index, value
1227   __ ldr(Rvalue_2, at_tos());     // Value
1228   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1229   __ ldr(Rarray_3, at_tos_p2());  // Array
1230 
1231   index_check_without_pop(Rarray_3, Rindex_4);
1232 
1233   // Compute the array base
1234   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1235 
1236   // do array store check - check for NULL value first
1237   __ cbz(Rvalue_2, is_null);
1238 
1239   // Load subklass
1240   __ load_klass(Rsub_5, Rvalue_2);
1241   // Load superklass
1242   __ load_klass(Rtemp, Rarray_3);
1243   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1244 
1245   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1246   // Come here on success
1247 
1248   // Store value
1249   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1250 
1251   // Now store using the appropriate barrier
1252   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, false);
1253   __ b(done);
1254 
1255   __ bind(throw_array_store);
1256 
1257   // Come here on failure of subtype check
1258   __ profile_typecheck_failed(R0_tmp);
1259 
1260   // object is at TOS
1261   __ b(Interpreter::_throw_ArrayStoreException_entry);
1262 
1263   // Have a NULL in Rvalue_2, store NULL at array[index].
1264   __ bind(is_null);
1265   __ profile_null_seen(R0_tmp);
1266 
1267   // Store a NULL
1268   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, true);
1269 
1270   // Pop stack arguments
1271   __ bind(done);
1272   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1273 }
1274 
1275 
1276 void TemplateTable::bastore() {
1277   transition(itos, vtos);
1278   const Register Rindex = R4_tmp; // index_check prefers index in R4
1279   const Register Rarray = R3_tmp;
1280   // R0_tos: value
1281 
1282   __ pop_i(Rindex);
1283   index_check(Rarray, Rindex);
1284 
1285   // Need to check whether array is boolean or byte
1286   // since both types share the bastore bytecode.
1287   __ load_klass(Rtemp, Rarray);
1288   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1289   Label L_skip;
1290   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1291   __ b(L_skip, eq);
1292   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1293   __ bind(L_skip);
1294   __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1295 }
1296 
1297 
1298 void TemplateTable::castore() {
1299   transition(itos, vtos);
1300   const Register Rindex = R4_tmp; // index_check prefers index in R4
1301   const Register Rarray = R3_tmp;
1302   // R0_tos: value
1303 
1304   __ pop_i(Rindex);
1305   index_check(Rarray, Rindex);
1306 
1307   __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1308 }
1309 
1310 
1311 void TemplateTable::sastore() {
1312   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1313            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1314          "base offsets for char and short should be equal");
1315   castore();
1316 }
1317 
1318 
1319 void TemplateTable::istore(int n) {
1320   transition(itos, vtos);
1321   __ str_32(R0_tos, iaddress(n));
1322 }
1323 
1324 
1325 void TemplateTable::lstore(int n) {
1326   transition(ltos, vtos);
1327 #ifdef AARCH64
1328   __ str(R0_tos, laddress(n));
1329 #else
1330   __ str(R0_tos_lo, laddress(n));
1331   __ str(R1_tos_hi, haddress(n));
1332 #endif // AARCH64
1333 }
1334 
1335 
1336 void TemplateTable::fstore(int n) {
1337   transition(ftos, vtos);
1338 #ifdef __SOFTFP__
1339   __ str(R0_tos, faddress(n));
1340 #else
1341   __ str_float(S0_tos, faddress(n));
1342 #endif // __SOFTFP__
1343 }
1344 
1345 
1346 void TemplateTable::dstore(int n) {
1347   transition(dtos, vtos);
1348 #ifdef __SOFTFP__
1349   __ str(R0_tos_lo, laddress(n));
1350   __ str(R1_tos_hi, haddress(n));
1351 #else
1352   __ str_double(D0_tos, daddress(n));
1353 #endif // __SOFTFP__
1354 }
1355 
1356 
1357 void TemplateTable::astore(int n) {
1358   transition(vtos, vtos);
1359   __ pop_ptr(R0_tos);
1360   __ str(R0_tos, aaddress(n));
1361 }
1362 
1363 
1364 void TemplateTable::pop() {
1365   transition(vtos, vtos);
1366   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1367 }
1368 
1369 
1370 void TemplateTable::pop2() {
1371   transition(vtos, vtos);
1372   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1373 }
1374 
1375 
1376 void TemplateTable::dup() {
1377   transition(vtos, vtos);
1378   // stack: ..., a
1379   __ load_ptr(0, R0_tmp);
1380   __ push_ptr(R0_tmp);
1381   // stack: ..., a, a
1382 }
1383 
1384 
1385 void TemplateTable::dup_x1() {
1386   transition(vtos, vtos);
1387   // stack: ..., a, b
1388   __ load_ptr(0, R0_tmp);  // load b
1389   __ load_ptr(1, R2_tmp);  // load a
1390   __ store_ptr(1, R0_tmp); // store b
1391   __ store_ptr(0, R2_tmp); // store a
1392   __ push_ptr(R0_tmp);     // push b
1393   // stack: ..., b, a, b
1394 }
1395 
1396 
1397 void TemplateTable::dup_x2() {
1398   transition(vtos, vtos);
1399   // stack: ..., a, b, c
1400   __ load_ptr(0, R0_tmp);   // load c
1401   __ load_ptr(1, R2_tmp);   // load b
1402   __ load_ptr(2, R4_tmp);   // load a
1403 
1404   __ push_ptr(R0_tmp);      // push c
1405 
1406   // stack: ..., a, b, c, c
1407   __ store_ptr(1, R2_tmp);  // store b
1408   __ store_ptr(2, R4_tmp);  // store a
1409   __ store_ptr(3, R0_tmp);  // store c
1410   // stack: ..., c, a, b, c
1411 }
1412 
1413 
1414 void TemplateTable::dup2() {
1415   transition(vtos, vtos);
1416   // stack: ..., a, b
1417   __ load_ptr(1, R0_tmp);  // load a
1418   __ push_ptr(R0_tmp);     // push a
1419   __ load_ptr(1, R0_tmp);  // load b
1420   __ push_ptr(R0_tmp);     // push b
1421   // stack: ..., a, b, a, b
1422 }
1423 
1424 
1425 void TemplateTable::dup2_x1() {
1426   transition(vtos, vtos);
1427 
1428   // stack: ..., a, b, c
1429   __ load_ptr(0, R4_tmp);  // load c
1430   __ load_ptr(1, R2_tmp);  // load b
1431   __ load_ptr(2, R0_tmp);  // load a
1432 
1433   __ push_ptr(R2_tmp);     // push b
1434   __ push_ptr(R4_tmp);     // push c
1435 
1436   // stack: ..., a, b, c, b, c
1437 
1438   __ store_ptr(2, R0_tmp);  // store a
1439   __ store_ptr(3, R4_tmp);  // store c
1440   __ store_ptr(4, R2_tmp);  // store b
1441 
1442   // stack: ..., b, c, a, b, c
1443 }
1444 
1445 
1446 void TemplateTable::dup2_x2() {
1447   transition(vtos, vtos);
1448   // stack: ..., a, b, c, d
1449   __ load_ptr(0, R0_tmp);  // load d
1450   __ load_ptr(1, R2_tmp);  // load c
1451   __ push_ptr(R2_tmp);     // push c
1452   __ push_ptr(R0_tmp);     // push d
1453   // stack: ..., a, b, c, d, c, d
1454   __ load_ptr(4, R4_tmp);  // load b
1455   __ store_ptr(4, R0_tmp); // store d in b
1456   __ store_ptr(2, R4_tmp); // store b in d
1457   // stack: ..., a, d, c, b, c, d
1458   __ load_ptr(5, R4_tmp);  // load a
1459   __ store_ptr(5, R2_tmp); // store c in a
1460   __ store_ptr(3, R4_tmp); // store a in c
1461   // stack: ..., c, d, a, b, c, d
1462 }
1463 
1464 
1465 void TemplateTable::swap() {
1466   transition(vtos, vtos);
1467   // stack: ..., a, b
1468   __ load_ptr(1, R0_tmp);  // load a
1469   __ load_ptr(0, R2_tmp);  // load b
1470   __ store_ptr(0, R0_tmp); // store a in b
1471   __ store_ptr(1, R2_tmp); // store b in a
1472   // stack: ..., b, a
1473 }
1474 
1475 
1476 void TemplateTable::iop2(Operation op) {
1477   transition(itos, itos);
1478   const Register arg1 = R1_tmp;
1479   const Register arg2 = R0_tos;
1480 
1481   __ pop_i(arg1);
1482   switch (op) {
1483     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1484     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1485     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1486     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1487     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1488     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1489 #ifdef AARCH64
1490     case shl  : __ lslv_w (R0_tos, arg1, arg2); break;
1491     case shr  : __ asrv_w (R0_tos, arg1, arg2); break;
1492     case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1493 #else
1494     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1495     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1496     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1497 #endif // AARCH64
1498     default   : ShouldNotReachHere();
1499   }
1500 }
1501 
1502 
1503 void TemplateTable::lop2(Operation op) {
1504   transition(ltos, ltos);
1505 #ifdef AARCH64
1506   const Register arg1 = R1_tmp;
1507   const Register arg2 = R0_tos;
1508 
1509   __ pop_l(arg1);
1510   switch (op) {
1511     case add  : __ add (R0_tos, arg1, arg2); break;
1512     case sub  : __ sub (R0_tos, arg1, arg2); break;
1513     case _and : __ andr(R0_tos, arg1, arg2); break;
1514     case _or  : __ orr (R0_tos, arg1, arg2); break;
1515     case _xor : __ eor (R0_tos, arg1, arg2); break;
1516     default   : ShouldNotReachHere();
1517   }
1518 #else
1519   const Register arg1_lo = R2_tmp;
1520   const Register arg1_hi = R3_tmp;
1521   const Register arg2_lo = R0_tos_lo;
1522   const Register arg2_hi = R1_tos_hi;
1523 
1524   __ pop_l(arg1_lo, arg1_hi);
1525   switch (op) {
1526     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1527     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1528     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1529     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1530     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1531     default : ShouldNotReachHere();
1532   }
1533 #endif // AARCH64
1534 }
1535 
1536 
1537 void TemplateTable::idiv() {
1538   transition(itos, itos);
1539 #ifdef AARCH64
1540   const Register divisor = R0_tos;
1541   const Register dividend = R1_tmp;
1542 
1543   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1544   __ pop_i(dividend);
1545   __ sdiv_w(R0_tos, dividend, divisor);
1546 #else
1547   __ mov(R2, R0_tos);
1548   __ pop_i(R0);
1549   // R0 - dividend
1550   // R2 - divisor
1551   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1552   // R1 - result
1553   __ mov(R0_tos, R1);
1554 #endif // AARCH64
1555 }
1556 
1557 
1558 void TemplateTable::irem() {
1559   transition(itos, itos);
1560 #ifdef AARCH64
1561   const Register divisor = R0_tos;
1562   const Register dividend = R1_tmp;
1563   const Register quotient = R2_tmp;
1564 
1565   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1566   __ pop_i(dividend);
1567   __ sdiv_w(quotient, dividend, divisor);
1568   __ msub_w(R0_tos, divisor, quotient, dividend);
1569 #else
1570   __ mov(R2, R0_tos);
1571   __ pop_i(R0);
1572   // R0 - dividend
1573   // R2 - divisor
1574   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1575   // R0 - remainder
1576 #endif // AARCH64
1577 }
1578 
1579 
1580 void TemplateTable::lmul() {
1581   transition(ltos, ltos);
1582 #ifdef AARCH64
1583   const Register arg1 = R0_tos;
1584   const Register arg2 = R1_tmp;
1585 
1586   __ pop_l(arg2);
1587   __ mul(R0_tos, arg1, arg2);
1588 #else
1589   const Register arg1_lo = R0_tos_lo;
1590   const Register arg1_hi = R1_tos_hi;
1591   const Register arg2_lo = R2_tmp;
1592   const Register arg2_hi = R3_tmp;
1593 
1594   __ pop_l(arg2_lo, arg2_hi);
1595 
1596   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1597 #endif // AARCH64
1598 }
1599 
1600 
1601 void TemplateTable::ldiv() {
1602   transition(ltos, ltos);
1603 #ifdef AARCH64
1604   const Register divisor = R0_tos;
1605   const Register dividend = R1_tmp;
1606 
1607   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1608   __ pop_l(dividend);
1609   __ sdiv(R0_tos, dividend, divisor);
1610 #else
1611   const Register x_lo = R2_tmp;
1612   const Register x_hi = R3_tmp;
1613   const Register y_lo = R0_tos_lo;
1614   const Register y_hi = R1_tos_hi;
1615 
1616   __ pop_l(x_lo, x_hi);
1617 
1618   // check if y = 0
1619   __ orrs(Rtemp, y_lo, y_hi);
1620   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1621   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1622 #endif // AARCH64
1623 }
1624 
1625 
1626 void TemplateTable::lrem() {
1627   transition(ltos, ltos);
1628 #ifdef AARCH64
1629   const Register divisor = R0_tos;
1630   const Register dividend = R1_tmp;
1631   const Register quotient = R2_tmp;
1632 
1633   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1634   __ pop_l(dividend);
1635   __ sdiv(quotient, dividend, divisor);
1636   __ msub(R0_tos, divisor, quotient, dividend);
1637 #else
1638   const Register x_lo = R2_tmp;
1639   const Register x_hi = R3_tmp;
1640   const Register y_lo = R0_tos_lo;
1641   const Register y_hi = R1_tos_hi;
1642 
1643   __ pop_l(x_lo, x_hi);
1644 
1645   // check if y = 0
1646   __ orrs(Rtemp, y_lo, y_hi);
1647   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1648   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1649 #endif // AARCH64
1650 }
1651 
1652 
1653 void TemplateTable::lshl() {
1654   transition(itos, ltos);
1655 #ifdef AARCH64
1656   const Register val = R1_tmp;
1657   const Register shift_cnt = R0_tos;
1658   __ pop_l(val);
1659   __ lslv(R0_tos, val, shift_cnt);
1660 #else
1661   const Register shift_cnt = R4_tmp;
1662   const Register val_lo = R2_tmp;
1663   const Register val_hi = R3_tmp;
1664 
1665   __ pop_l(val_lo, val_hi);
1666   __ andr(shift_cnt, R0_tos, 63);
1667   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1668 #endif // AARCH64
1669 }
1670 
1671 
1672 void TemplateTable::lshr() {
1673   transition(itos, ltos);
1674 #ifdef AARCH64
1675   const Register val = R1_tmp;
1676   const Register shift_cnt = R0_tos;
1677   __ pop_l(val);
1678   __ asrv(R0_tos, val, shift_cnt);
1679 #else
1680   const Register shift_cnt = R4_tmp;
1681   const Register val_lo = R2_tmp;
1682   const Register val_hi = R3_tmp;
1683 
1684   __ pop_l(val_lo, val_hi);
1685   __ andr(shift_cnt, R0_tos, 63);
1686   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1687 #endif // AARCH64
1688 }
1689 
1690 
1691 void TemplateTable::lushr() {
1692   transition(itos, ltos);
1693 #ifdef AARCH64
1694   const Register val = R1_tmp;
1695   const Register shift_cnt = R0_tos;
1696   __ pop_l(val);
1697   __ lsrv(R0_tos, val, shift_cnt);
1698 #else
1699   const Register shift_cnt = R4_tmp;
1700   const Register val_lo = R2_tmp;
1701   const Register val_hi = R3_tmp;
1702 
1703   __ pop_l(val_lo, val_hi);
1704   __ andr(shift_cnt, R0_tos, 63);
1705   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1706 #endif // AARCH64
1707 }
1708 
1709 
1710 void TemplateTable::fop2(Operation op) {
1711   transition(ftos, ftos);
1712 #ifdef __SOFTFP__
1713   __ mov(R1, R0_tos);
1714   __ pop_i(R0);
1715   switch (op) {
1716     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1717     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1718     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1719     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1720     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1721     default : ShouldNotReachHere();
1722   }
1723 #else
1724   const FloatRegister arg1 = S1_tmp;
1725   const FloatRegister arg2 = S0_tos;
1726 
1727   switch (op) {
1728     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1729     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1730     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1731     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1732     case rem:
1733 #ifndef __ABI_HARD__
1734       __ pop_f(arg1);
1735       __ fmrs(R0, arg1);
1736       __ fmrs(R1, arg2);
1737       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1738       __ fmsr(S0_tos, R0);
1739 #else
1740       __ mov_float(S1_reg, arg2);
1741       __ pop_f(S0);
1742       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1743 #endif // !__ABI_HARD__
1744       break;
1745     default : ShouldNotReachHere();
1746   }
1747 #endif // __SOFTFP__
1748 }
1749 
1750 
1751 void TemplateTable::dop2(Operation op) {
1752   transition(dtos, dtos);
1753 #ifdef __SOFTFP__
1754   __ mov(R2, R0_tos_lo);
1755   __ mov(R3, R1_tos_hi);
1756   __ pop_l(R0, R1);
1757   switch (op) {
1758     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1759     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1760     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1761     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1762     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1763     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1764     default : ShouldNotReachHere();
1765   }
1766 #else
1767   const FloatRegister arg1 = D1_tmp;
1768   const FloatRegister arg2 = D0_tos;
1769 
1770   switch (op) {
1771     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1772     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1773     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1774     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1775     case rem:
1776 #ifndef __ABI_HARD__
1777       __ pop_d(arg1);
1778       __ fmrrd(R0, R1, arg1);
1779       __ fmrrd(R2, R3, arg2);
1780       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1781       __ fmdrr(D0_tos, R0, R1);
1782 #else
1783       __ mov_double(D1, arg2);
1784       __ pop_d(D0);
1785       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1786 #endif // !__ABI_HARD__
1787       break;
1788     default : ShouldNotReachHere();
1789   }
1790 #endif // __SOFTFP__
1791 }
1792 
1793 
1794 void TemplateTable::ineg() {
1795   transition(itos, itos);
1796   __ neg_32(R0_tos, R0_tos);
1797 }
1798 
1799 
1800 void TemplateTable::lneg() {
1801   transition(ltos, ltos);
1802 #ifdef AARCH64
1803   __ neg(R0_tos, R0_tos);
1804 #else
1805   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1806   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1807 #endif // AARCH64
1808 }
1809 
1810 
1811 void TemplateTable::fneg() {
1812   transition(ftos, ftos);
1813 #ifdef __SOFTFP__
1814   // Invert sign bit
1815   const int sign_mask = 0x80000000;
1816   __ eor(R0_tos, R0_tos, sign_mask);
1817 #else
1818   __ neg_float(S0_tos, S0_tos);
1819 #endif // __SOFTFP__
1820 }
1821 
1822 
1823 void TemplateTable::dneg() {
1824   transition(dtos, dtos);
1825 #ifdef __SOFTFP__
1826   // Invert sign bit in the high part of the double
1827   const int sign_mask_hi = 0x80000000;
1828   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1829 #else
1830   __ neg_double(D0_tos, D0_tos);
1831 #endif // __SOFTFP__
1832 }
1833 
1834 
1835 void TemplateTable::iinc() {
1836   transition(vtos, vtos);
1837   const Register Rconst = R2_tmp;
1838   const Register Rlocal_index = R1_tmp;
1839   const Register Rval = R0_tmp;
1840 
1841   __ ldrsb(Rconst, at_bcp(2));
1842   locals_index(Rlocal_index);
1843   Address local = load_iaddress(Rlocal_index, Rtemp);
1844   __ ldr_s32(Rval, local);
1845   __ add(Rval, Rval, Rconst);
1846   __ str_32(Rval, local);
1847 }
1848 
1849 
1850 void TemplateTable::wide_iinc() {
1851   transition(vtos, vtos);
1852   const Register Rconst = R2_tmp;
1853   const Register Rlocal_index = R1_tmp;
1854   const Register Rval = R0_tmp;
1855 
1856   // get constant in Rconst
1857   __ ldrsb(R2_tmp, at_bcp(4));
1858   __ ldrb(R3_tmp, at_bcp(5));
1859   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1860 
1861   locals_index_wide(Rlocal_index);
1862   Address local = load_iaddress(Rlocal_index, Rtemp);
1863   __ ldr_s32(Rval, local);
1864   __ add(Rval, Rval, Rconst);
1865   __ str_32(Rval, local);
1866 }
1867 
1868 
1869 void TemplateTable::convert() {
1870   // Checking
1871 #ifdef ASSERT
1872   { TosState tos_in  = ilgl;
1873     TosState tos_out = ilgl;
1874     switch (bytecode()) {
1875       case Bytecodes::_i2l: // fall through
1876       case Bytecodes::_i2f: // fall through
1877       case Bytecodes::_i2d: // fall through
1878       case Bytecodes::_i2b: // fall through
1879       case Bytecodes::_i2c: // fall through
1880       case Bytecodes::_i2s: tos_in = itos; break;
1881       case Bytecodes::_l2i: // fall through
1882       case Bytecodes::_l2f: // fall through
1883       case Bytecodes::_l2d: tos_in = ltos; break;
1884       case Bytecodes::_f2i: // fall through
1885       case Bytecodes::_f2l: // fall through
1886       case Bytecodes::_f2d: tos_in = ftos; break;
1887       case Bytecodes::_d2i: // fall through
1888       case Bytecodes::_d2l: // fall through
1889       case Bytecodes::_d2f: tos_in = dtos; break;
1890       default             : ShouldNotReachHere();
1891     }
1892     switch (bytecode()) {
1893       case Bytecodes::_l2i: // fall through
1894       case Bytecodes::_f2i: // fall through
1895       case Bytecodes::_d2i: // fall through
1896       case Bytecodes::_i2b: // fall through
1897       case Bytecodes::_i2c: // fall through
1898       case Bytecodes::_i2s: tos_out = itos; break;
1899       case Bytecodes::_i2l: // fall through
1900       case Bytecodes::_f2l: // fall through
1901       case Bytecodes::_d2l: tos_out = ltos; break;
1902       case Bytecodes::_i2f: // fall through
1903       case Bytecodes::_l2f: // fall through
1904       case Bytecodes::_d2f: tos_out = ftos; break;
1905       case Bytecodes::_i2d: // fall through
1906       case Bytecodes::_l2d: // fall through
1907       case Bytecodes::_f2d: tos_out = dtos; break;
1908       default             : ShouldNotReachHere();
1909     }
1910     transition(tos_in, tos_out);
1911   }
1912 #endif // ASSERT
1913 
1914   // Conversion
1915   switch (bytecode()) {
1916     case Bytecodes::_i2l:
1917 #ifdef AARCH64
1918       __ sign_extend(R0_tos, R0_tos, 32);
1919 #else
1920       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1921 #endif // AARCH64
1922       break;
1923 
1924     case Bytecodes::_i2f:
1925 #ifdef AARCH64
1926       __ scvtf_sw(S0_tos, R0_tos);
1927 #else
1928 #ifdef __SOFTFP__
1929       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1930 #else
1931       __ fmsr(S0_tmp, R0_tos);
1932       __ fsitos(S0_tos, S0_tmp);
1933 #endif // __SOFTFP__
1934 #endif // AARCH64
1935       break;
1936 
1937     case Bytecodes::_i2d:
1938 #ifdef AARCH64
1939       __ scvtf_dw(D0_tos, R0_tos);
1940 #else
1941 #ifdef __SOFTFP__
1942       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1943 #else
1944       __ fmsr(S0_tmp, R0_tos);
1945       __ fsitod(D0_tos, S0_tmp);
1946 #endif // __SOFTFP__
1947 #endif // AARCH64
1948       break;
1949 
1950     case Bytecodes::_i2b:
1951       __ sign_extend(R0_tos, R0_tos, 8);
1952       break;
1953 
1954     case Bytecodes::_i2c:
1955       __ zero_extend(R0_tos, R0_tos, 16);
1956       break;
1957 
1958     case Bytecodes::_i2s:
1959       __ sign_extend(R0_tos, R0_tos, 16);
1960       break;
1961 
1962     case Bytecodes::_l2i:
1963       /* nothing to do */
1964       break;
1965 
1966     case Bytecodes::_l2f:
1967 #ifdef AARCH64
1968       __ scvtf_sx(S0_tos, R0_tos);
1969 #else
1970       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1971 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1972       __ fmsr(S0_tos, R0);
1973 #endif // !__SOFTFP__ && !__ABI_HARD__
1974 #endif // AARCH64
1975       break;
1976 
1977     case Bytecodes::_l2d:
1978 #ifdef AARCH64
1979       __ scvtf_dx(D0_tos, R0_tos);
1980 #else
1981       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1982 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1983       __ fmdrr(D0_tos, R0, R1);
1984 #endif // !__SOFTFP__ && !__ABI_HARD__
1985 #endif // AARCH64
1986       break;
1987 
1988     case Bytecodes::_f2i:
1989 #ifdef AARCH64
1990       __ fcvtzs_ws(R0_tos, S0_tos);
1991 #else
1992 #ifndef __SOFTFP__
1993       __ ftosizs(S0_tos, S0_tos);
1994       __ fmrs(R0_tos, S0_tos);
1995 #else
1996       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1997 #endif // !__SOFTFP__
1998 #endif // AARCH64
1999       break;
2000 
2001     case Bytecodes::_f2l:
2002 #ifdef AARCH64
2003       __ fcvtzs_xs(R0_tos, S0_tos);
2004 #else
2005 #ifndef __SOFTFP__
2006       __ fmrs(R0_tos, S0_tos);
2007 #endif // !__SOFTFP__
2008       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
2009 #endif // AARCH64
2010       break;
2011 
2012     case Bytecodes::_f2d:
2013 #ifdef __SOFTFP__
2014       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
2015 #else
2016       __ convert_f2d(D0_tos, S0_tos);
2017 #endif // __SOFTFP__
2018       break;
2019 
2020     case Bytecodes::_d2i:
2021 #ifdef AARCH64
2022       __ fcvtzs_wd(R0_tos, D0_tos);
2023 #else
2024 #ifndef __SOFTFP__
2025       __ ftosizd(Stemp, D0);
2026       __ fmrs(R0, Stemp);
2027 #else
2028       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
2029 #endif // !__SOFTFP__
2030 #endif // AARCH64
2031       break;
2032 
2033     case Bytecodes::_d2l:
2034 #ifdef AARCH64
2035       __ fcvtzs_xd(R0_tos, D0_tos);
2036 #else
2037 #ifndef __SOFTFP__
2038       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
2039 #endif // !__SOFTFP__
2040       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
2041 #endif // AARCH64
2042       break;
2043 
2044     case Bytecodes::_d2f:
2045 #ifdef __SOFTFP__
2046       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
2047 #else
2048       __ convert_d2f(S0_tos, D0_tos);
2049 #endif // __SOFTFP__
2050       break;
2051 
2052     default:
2053       ShouldNotReachHere();
2054   }
2055 }
2056 
2057 
2058 void TemplateTable::lcmp() {
2059   transition(ltos, itos);
2060 #ifdef AARCH64
2061   const Register arg1 = R1_tmp;
2062   const Register arg2 = R0_tos;
2063 
2064   __ pop_l(arg1);
2065 
2066   __ cmp(arg1, arg2);
2067   __ cset(R0_tos, gt);               // 1 if '>', else 0
2068   __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2069 #else
2070   const Register arg1_lo = R2_tmp;
2071   const Register arg1_hi = R3_tmp;
2072   const Register arg2_lo = R0_tos_lo;
2073   const Register arg2_hi = R1_tos_hi;
2074   const Register res = R4_tmp;
2075 
2076   __ pop_l(arg1_lo, arg1_hi);
2077 
2078   // long compare arg1 with arg2
2079   // result is -1/0/+1 if '<'/'='/'>'
2080   Label done;
2081 
2082   __ mov (res, 0);
2083   __ cmp (arg1_hi, arg2_hi);
2084   __ mvn (res, 0, lt);
2085   __ mov (res, 1, gt);
2086   __ b(done, ne);
2087   __ cmp (arg1_lo, arg2_lo);
2088   __ mvn (res, 0, lo);
2089   __ mov (res, 1, hi);
2090   __ bind(done);
2091   __ mov (R0_tos, res);
2092 #endif // AARCH64
2093 }
2094 
2095 
2096 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2097   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2098 
2099 #ifdef AARCH64
2100   if (is_float) {
2101     transition(ftos, itos);
2102     __ pop_f(S1_tmp);
2103     __ fcmp_s(S1_tmp, S0_tos);
2104   } else {
2105     transition(dtos, itos);
2106     __ pop_d(D1_tmp);
2107     __ fcmp_d(D1_tmp, D0_tos);
2108   }
2109 
2110   if (unordered_result < 0) {
2111     __ cset(R0_tos, gt);               // 1 if '>', else 0
2112     __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2113   } else {
2114     __ cset(R0_tos, hi);               // 1 if '>' or unordered, else 0
2115     __ csinv(R0_tos, R0_tos, ZR, pl);  // previous value if '>=' or unordered, else -1
2116   }
2117 
2118 #else
2119 
2120 #ifdef __SOFTFP__
2121 
2122   if (is_float) {
2123     transition(ftos, itos);
2124     const Register Rx = R0;
2125     const Register Ry = R1;
2126 
2127     __ mov(Ry, R0_tos);
2128     __ pop_i(Rx);
2129 
2130     if (unordered_result == 1) {
2131       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2132     } else {
2133       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2134     }
2135 
2136   } else {
2137 
2138     transition(dtos, itos);
2139     const Register Rx_lo = R0;
2140     const Register Rx_hi = R1;
2141     const Register Ry_lo = R2;
2142     const Register Ry_hi = R3;
2143 
2144     __ mov(Ry_lo, R0_tos_lo);
2145     __ mov(Ry_hi, R1_tos_hi);
2146     __ pop_l(Rx_lo, Rx_hi);
2147 
2148     if (unordered_result == 1) {
2149       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2150     } else {
2151       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2152     }
2153   }
2154 
2155 #else
2156 
2157   if (is_float) {
2158     transition(ftos, itos);
2159     __ pop_f(S1_tmp);
2160     __ fcmps(S1_tmp, S0_tos);
2161   } else {
2162     transition(dtos, itos);
2163     __ pop_d(D1_tmp);
2164     __ fcmpd(D1_tmp, D0_tos);
2165   }
2166 
2167   __ fmstat();
2168 
2169   // comparison result | flag N | flag Z | flag C | flag V
2170   // "<"               |   1    |   0    |   0    |   0
2171   // "=="              |   0    |   1    |   1    |   0
2172   // ">"               |   0    |   0    |   1    |   0
2173   // unordered         |   0    |   0    |   1    |   1
2174 
2175   if (unordered_result < 0) {
2176     __ mov(R0_tos, 1);           // result ==  1 if greater
2177     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2178   } else {
2179     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2180     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2181   }
2182   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2183 #endif // __SOFTFP__
2184 #endif // AARCH64
2185 }
2186 
2187 
2188 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2189 
2190   const Register Rdisp = R0_tmp;
2191   const Register Rbumped_taken_count = R5_tmp;
2192 
2193   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2194 
2195   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2196                              InvocationCounter::counter_offset();
2197   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2198                               InvocationCounter::counter_offset();
2199   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2200 
2201   // Load up R0 with the branch displacement
2202   if (is_wide) {
2203     __ ldrsb(R0_tmp, at_bcp(1));
2204     __ ldrb(R1_tmp, at_bcp(2));
2205     __ ldrb(R2_tmp, at_bcp(3));
2206     __ ldrb(R3_tmp, at_bcp(4));
2207     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2208     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2209     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2210   } else {
2211     __ ldrsb(R0_tmp, at_bcp(1));
2212     __ ldrb(R1_tmp, at_bcp(2));
2213     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2214   }
2215 
2216   // Handle all the JSR stuff here, then exit.
2217   // It's much shorter and cleaner than intermingling with the
2218   // non-JSR normal-branch stuff occuring below.
2219   if (is_jsr) {
2220     // compute return address as bci in R1
2221     const Register Rret_addr = R1_tmp;
2222     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2223 
2224     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2225     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2226     __ sub(Rret_addr, Rret_addr, Rtemp);
2227 
2228     // Load the next target bytecode into R3_bytecode and advance Rbcp
2229 #ifdef AARCH64
2230     __ add(Rbcp, Rbcp, Rdisp);
2231     __ ldrb(R3_bytecode, Address(Rbcp));
2232 #else
2233     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2234 #endif // AARCH64
2235 
2236     // Push return address
2237     __ push_i(Rret_addr);
2238     // jsr returns vtos
2239     __ dispatch_only_noverify(vtos);
2240     return;
2241   }
2242 
2243   // Normal (non-jsr) branch handling
2244 
2245   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2246 #ifdef AARCH64
2247   __ add(Rbcp, Rbcp, Rdisp);
2248   __ ldrb(R3_bytecode, Address(Rbcp));
2249 #else
2250   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2251 #endif // AARCH64
2252 
2253   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2254   Label backedge_counter_overflow;
2255   Label profile_method;
2256   Label dispatch;
2257 
2258   if (UseLoopCounter) {
2259     // increment backedge counter for backward branches
2260     // Rdisp (R0): target offset
2261 
2262     const Register Rcnt = R2_tmp;
2263     const Register Rcounters = R1_tmp;
2264 
2265     // count only if backward branch
2266 #ifdef AARCH64
2267     __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2268 #else
2269     __ tst(Rdisp, Rdisp);
2270     __ b(dispatch, pl);
2271 #endif // AARCH64
2272 
2273     if (TieredCompilation) {
2274       Label no_mdo;
2275       int increment = InvocationCounter::count_increment;
2276       if (ProfileInterpreter) {
2277         // Are we profiling?
2278         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2279         __ cbz(Rtemp, no_mdo);
2280         // Increment the MDO backedge counter
2281         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2282                                                   in_bytes(InvocationCounter::counter_offset()));
2283         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2284         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2285                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2286         __ b(dispatch);
2287       }
2288       __ bind(no_mdo);
2289       // Increment backedge counter in MethodCounters*
2290       // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2291       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2292                              Rdisp, R3_bytecode,
2293                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2294       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2295       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2296                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2297     } else {
2298       // Increment backedge counter in MethodCounters*
2299       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2300                              Rdisp, R3_bytecode,
2301                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2302       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2303       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2304       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2305 
2306       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2307 #ifdef AARCH64
2308       __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value);  // and the status bits
2309 #else
2310       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2311 #endif // AARCH64
2312       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2313 
2314       if (ProfileInterpreter) {
2315         // Test to see if we should create a method data oop
2316         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2317         __ ldr_s32(Rtemp, profile_limit);
2318         __ cmp_32(Rcnt, Rtemp);
2319         __ b(dispatch, lt);
2320 
2321         // if no method data exists, go to profile method
2322         __ test_method_data_pointer(R4_tmp, profile_method);
2323 
2324         if (UseOnStackReplacement) {
2325           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2326           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2327           __ ldr_s32(Rtemp, backward_branch_limit);
2328           __ cmp(Rbumped_taken_count, Rtemp);
2329           __ b(dispatch, lo);
2330 
2331           // When ProfileInterpreter is on, the backedge_count comes from the
2332           // MethodData*, which value does not get reset on the call to
2333           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2334           // routine while the method is being compiled, add a second test to make
2335           // sure the overflow function is called only once every overflow_frequency.
2336           const int overflow_frequency = 1024;
2337 
2338 #ifdef AARCH64
2339           __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2340 #else
2341           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2342           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2343           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2344 #endif // AARCH64
2345 
2346           __ b(backedge_counter_overflow, eq);
2347         }
2348       } else {
2349         if (UseOnStackReplacement) {
2350           // check for overflow against Rcnt, which is the sum of the counters
2351           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2352           __ ldr_s32(Rtemp, backward_branch_limit);
2353           __ cmp_32(Rcnt, Rtemp);
2354           __ b(backedge_counter_overflow, hs);
2355 
2356         }
2357       }
2358     }
2359     __ bind(dispatch);
2360   }
2361 
2362   if (!UseOnStackReplacement) {
2363     __ bind(backedge_counter_overflow);
2364   }
2365 
2366   // continue with the bytecode @ target
2367   __ dispatch_only(vtos);
2368 
2369   if (UseLoopCounter) {
2370     if (ProfileInterpreter) {
2371       // Out-of-line code to allocate method data oop.
2372       __ bind(profile_method);
2373 
2374       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2375       __ set_method_data_pointer_for_bcp();
2376       // reload next bytecode
2377       __ ldrb(R3_bytecode, Address(Rbcp));
2378       __ b(dispatch);
2379     }
2380 
2381     if (UseOnStackReplacement) {
2382       // invocation counter overflow
2383       __ bind(backedge_counter_overflow);
2384 
2385       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2386       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2387 
2388       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2389       const Register Rnmethod = R0;
2390 
2391       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2392 
2393       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2394 
2395       // nmethod may have been invalidated (VM may block upon call_VM return)
2396       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2397       __ cmp(R1_tmp, nmethod::in_use);
2398       __ b(dispatch, ne);
2399 
2400       // We have the address of an on stack replacement routine in Rnmethod,
2401       // We need to prepare to execute the OSR method. First we must
2402       // migrate the locals and monitors off of the stack.
2403 
2404       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2405 
2406       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2407 
2408       // R0 is OSR buffer
2409 
2410       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2411       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2412 
2413 #ifdef AARCH64
2414       __ ldp(FP, LR, Address(FP));
2415       __ mov(SP, Rtemp);
2416 #else
2417       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2418       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2419 #endif // AARCH64
2420 
2421       __ jump(R1_tmp);
2422     }
2423   }
2424 }
2425 
2426 
2427 void TemplateTable::if_0cmp(Condition cc) {
2428   transition(itos, vtos);
2429   // assume branch is more often taken than not (loops use backward branches)
2430   Label not_taken;
2431 #ifdef AARCH64
2432   if (cc == equal) {
2433     __ cbnz_w(R0_tos, not_taken);
2434   } else if (cc == not_equal) {
2435     __ cbz_w(R0_tos, not_taken);
2436   } else {
2437     __ cmp_32(R0_tos, 0);
2438     __ b(not_taken, convNegCond(cc));
2439   }
2440 #else
2441   __ cmp_32(R0_tos, 0);
2442   __ b(not_taken, convNegCond(cc));
2443 #endif // AARCH64
2444   branch(false, false);
2445   __ bind(not_taken);
2446   __ profile_not_taken_branch(R0_tmp);
2447 }
2448 
2449 
2450 void TemplateTable::if_icmp(Condition cc) {
2451   transition(itos, vtos);
2452   // assume branch is more often taken than not (loops use backward branches)
2453   Label not_taken;
2454   __ pop_i(R1_tmp);
2455   __ cmp_32(R1_tmp, R0_tos);
2456   __ b(not_taken, convNegCond(cc));
2457   branch(false, false);
2458   __ bind(not_taken);
2459   __ profile_not_taken_branch(R0_tmp);
2460 }
2461 
2462 
2463 void TemplateTable::if_nullcmp(Condition cc) {
2464   transition(atos, vtos);
2465   assert(cc == equal || cc == not_equal, "invalid condition");
2466 
2467   // assume branch is more often taken than not (loops use backward branches)
2468   Label not_taken;
2469   if (cc == equal) {
2470     __ cbnz(R0_tos, not_taken);
2471   } else {
2472     __ cbz(R0_tos, not_taken);
2473   }
2474   branch(false, false);
2475   __ bind(not_taken);
2476   __ profile_not_taken_branch(R0_tmp);
2477 }
2478 
2479 
2480 void TemplateTable::if_acmp(Condition cc) {
2481   transition(atos, vtos);
2482   // assume branch is more often taken than not (loops use backward branches)
2483   Label not_taken;
2484   __ pop_ptr(R1_tmp);
2485   __ cmp(R1_tmp, R0_tos);
2486   __ b(not_taken, convNegCond(cc));
2487   branch(false, false);
2488   __ bind(not_taken);
2489   __ profile_not_taken_branch(R0_tmp);
2490 }
2491 
2492 
2493 void TemplateTable::ret() {
2494   transition(vtos, vtos);
2495   const Register Rlocal_index = R1_tmp;
2496   const Register Rret_bci = Rtmp_save0; // R4/R19
2497 
2498   locals_index(Rlocal_index);
2499   Address local = load_iaddress(Rlocal_index, Rtemp);
2500   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2501   __ profile_ret(Rtmp_save1, Rret_bci);
2502   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2503   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2504   __ add(Rbcp, Rtemp, Rret_bci);
2505   __ dispatch_next(vtos);
2506 }
2507 
2508 
2509 void TemplateTable::wide_ret() {
2510   transition(vtos, vtos);
2511   const Register Rlocal_index = R1_tmp;
2512   const Register Rret_bci = Rtmp_save0; // R4/R19
2513 
2514   locals_index_wide(Rlocal_index);
2515   Address local = load_iaddress(Rlocal_index, Rtemp);
2516   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2517   __ profile_ret(Rtmp_save1, Rret_bci);
2518   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2519   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2520   __ add(Rbcp, Rtemp, Rret_bci);
2521   __ dispatch_next(vtos);
2522 }
2523 
2524 
2525 void TemplateTable::tableswitch() {
2526   transition(itos, vtos);
2527 
2528   const Register Rindex  = R0_tos;
2529 #ifndef AARCH64
2530   const Register Rtemp2  = R1_tmp;
2531 #endif // !AARCH64
2532   const Register Rabcp   = R2_tmp;  // aligned bcp
2533   const Register Rlow    = R3_tmp;
2534   const Register Rhigh   = R4_tmp;
2535   const Register Roffset = R5_tmp;
2536 
2537   // align bcp
2538   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2539   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2540 
2541   // load lo & hi
2542 #ifdef AARCH64
2543   __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2544 #else
2545   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2546 #endif // AARCH64
2547   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2548   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2549 
2550   // compare index with high bound
2551   __ cmp_32(Rhigh, Rindex);
2552 
2553 #ifdef AARCH64
2554   Label default_case, do_dispatch;
2555   __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2556   __ b(default_case, lt);
2557 
2558   __ sub_w(Rindex, Rindex, Rlow);
2559   __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2560   if(ProfileInterpreter) {
2561     __ sxtw(Rindex, Rindex);
2562     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2563   }
2564   __ b(do_dispatch);
2565 
2566   __ bind(default_case);
2567   __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2568   if(ProfileInterpreter) {
2569     __ profile_switch_default(R0_tmp);
2570   }
2571 
2572   __ bind(do_dispatch);
2573 #else
2574 
2575   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2576   __ subs(Rindex, Rindex, Rlow, ge);
2577 
2578   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2579   // ("ge" status accumulated from cmp and subs instructions) then load
2580   // offset from table, otherwise load offset for default case
2581 
2582   if(ProfileInterpreter) {
2583     Label default_case, continue_execution;
2584 
2585     __ b(default_case, lt);
2586     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2587     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2588     __ b(continue_execution);
2589 
2590     __ bind(default_case);
2591     __ profile_switch_default(R0_tmp);
2592     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2593 
2594     __ bind(continue_execution);
2595   } else {
2596     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2597     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2598   }
2599 #endif // AARCH64
2600 
2601   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2602 
2603   // load the next bytecode to R3_bytecode and advance Rbcp
2604 #ifdef AARCH64
2605   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2606   __ ldrb(R3_bytecode, Address(Rbcp));
2607 #else
2608   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2609 #endif // AARCH64
2610   __ dispatch_only(vtos);
2611 
2612 }
2613 
2614 
2615 void TemplateTable::lookupswitch() {
2616   transition(itos, itos);
2617   __ stop("lookupswitch bytecode should have been rewritten");
2618 }
2619 
2620 
2621 void TemplateTable::fast_linearswitch() {
2622   transition(itos, vtos);
2623   Label loop, found, default_case, continue_execution;
2624 
2625   const Register Rkey     = R0_tos;
2626   const Register Rabcp    = R2_tmp;  // aligned bcp
2627   const Register Rdefault = R3_tmp;
2628   const Register Rcount   = R4_tmp;
2629   const Register Roffset  = R5_tmp;
2630 
2631   // bswap Rkey, so we can avoid bswapping the table entries
2632   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2633 
2634   // align bcp
2635   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2636   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2637 
2638   // load default & counter
2639 #ifdef AARCH64
2640   __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2641 #else
2642   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2643 #endif // AARCH64
2644   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2645 
2646 #ifdef AARCH64
2647   __ cbz_w(Rcount, default_case);
2648 #else
2649   __ cmp_32(Rcount, 0);
2650   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2651   __ b(default_case, eq);
2652 #endif // AARCH64
2653 
2654   // table search
2655   __ bind(loop);
2656 #ifdef AARCH64
2657   __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2658 #endif // AARCH64
2659   __ cmp_32(Rtemp, Rkey);
2660   __ b(found, eq);
2661   __ subs(Rcount, Rcount, 1);
2662 #ifndef AARCH64
2663   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2664 #endif // !AARCH64
2665   __ b(loop, ne);
2666 
2667   // default case
2668   __ bind(default_case);
2669   __ profile_switch_default(R0_tmp);
2670   __ mov(Roffset, Rdefault);
2671   __ b(continue_execution);
2672 
2673   // entry found -> get offset
2674   __ bind(found);
2675   // Rabcp is already incremented and points to the next entry
2676   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2677   if (ProfileInterpreter) {
2678     // Calculate index of the selected case.
2679     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2680 
2681     // align bcp
2682     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2683     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2684 
2685     // load number of cases
2686     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2687     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2688 
2689     // Selected index = <number of cases> - <current loop count>
2690     __ sub(R1_tmp, R2_tmp, Rcount);
2691     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2692   }
2693 
2694   // continue execution
2695   __ bind(continue_execution);
2696   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2697 
2698   // load the next bytecode to R3_bytecode and advance Rbcp
2699 #ifdef AARCH64
2700   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2701   __ ldrb(R3_bytecode, Address(Rbcp));
2702 #else
2703   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2704 #endif // AARCH64
2705   __ dispatch_only(vtos);
2706 }
2707 
2708 
2709 void TemplateTable::fast_binaryswitch() {
2710   transition(itos, vtos);
2711   // Implementation using the following core algorithm:
2712   //
2713   // int binary_search(int key, LookupswitchPair* array, int n) {
2714   //   // Binary search according to "Methodik des Programmierens" by
2715   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2716   //   int i = 0;
2717   //   int j = n;
2718   //   while (i+1 < j) {
2719   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2720   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2721   //     // where a stands for the array and assuming that the (inexisting)
2722   //     // element a[n] is infinitely big.
2723   //     int h = (i + j) >> 1;
2724   //     // i < h < j
2725   //     if (key < array[h].fast_match()) {
2726   //       j = h;
2727   //     } else {
2728   //       i = h;
2729   //     }
2730   //   }
2731   //   // R: a[i] <= key < a[i+1] or Q
2732   //   // (i.e., if key is within array, i is the correct index)
2733   //   return i;
2734   // }
2735 
2736   // register allocation
2737   const Register key    = R0_tos;                // already set (tosca)
2738   const Register array  = R1_tmp;
2739   const Register i      = R2_tmp;
2740   const Register j      = R3_tmp;
2741   const Register h      = R4_tmp;
2742   const Register val    = R5_tmp;
2743   const Register temp1  = Rtemp;
2744   const Register temp2  = LR_tmp;
2745   const Register offset = R3_tmp;
2746 
2747   // set 'array' = aligned bcp + 2 ints
2748   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2749   __ align_reg(array, temp1, BytesPerInt);
2750 
2751   // initialize i & j
2752   __ mov(i, 0);                                  // i = 0;
2753   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2754   // Convert j into native byteordering
2755   __ byteswap_u32(j, temp1, temp2);
2756 
2757   // and start
2758   Label entry;
2759   __ b(entry);
2760 
2761   // binary search loop
2762   { Label loop;
2763     __ bind(loop);
2764     // int h = (i + j) >> 1;
2765     __ add(h, i, j);                             // h = i + j;
2766     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2767     // if (key < array[h].fast_match()) {
2768     //   j = h;
2769     // } else {
2770     //   i = h;
2771     // }
2772 #ifdef AARCH64
2773     __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2774     __ ldr_s32(val, Address(temp1));
2775 #else
2776     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2777 #endif // AARCH64
2778     // Convert array[h].match to native byte-ordering before compare
2779     __ byteswap_u32(val, temp1, temp2);
2780     __ cmp_32(key, val);
2781     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2782     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2783     // while (i+1 < j)
2784     __ bind(entry);
2785     __ add(temp1, i, 1);                             // i+1
2786     __ cmp(temp1, j);                                // i+1 < j
2787     __ b(loop, lt);
2788   }
2789 
2790   // end of binary search, result index is i (must check again!)
2791   Label default_case;
2792   // Convert array[i].match to native byte-ordering before compare
2793 #ifdef AARCH64
2794   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2795   __ ldr_s32(val, Address(temp1));
2796 #else
2797   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2798 #endif // AARCH64
2799   __ byteswap_u32(val, temp1, temp2);
2800   __ cmp_32(key, val);
2801   __ b(default_case, ne);
2802 
2803   // entry found
2804   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2805   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2806   __ profile_switch_case(R0, i, R1, i);
2807   __ byteswap_u32(offset, temp1, temp2);
2808 #ifdef AARCH64
2809   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2810   __ ldrb(R3_bytecode, Address(Rbcp));
2811 #else
2812   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2813 #endif // AARCH64
2814   __ dispatch_only(vtos);
2815 
2816   // default case
2817   __ bind(default_case);
2818   __ profile_switch_default(R0);
2819   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2820   __ byteswap_u32(offset, temp1, temp2);
2821 #ifdef AARCH64
2822   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2823   __ ldrb(R3_bytecode, Address(Rbcp));
2824 #else
2825   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2826 #endif // AARCH64
2827   __ dispatch_only(vtos);
2828 }
2829 
2830 
2831 void TemplateTable::_return(TosState state) {
2832   transition(state, state);
2833   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2834 
2835   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2836     Label skip_register_finalizer;
2837     assert(state == vtos, "only valid state");
2838     __ ldr(R1, aaddress(0));
2839     __ load_klass(Rtemp, R1);
2840     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2841     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2842 
2843     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2844 
2845     __ bind(skip_register_finalizer);
2846   }
2847 
2848   // Narrow result if state is itos but result type is smaller.
2849   // Need to narrow in the return bytecode rather than in generate_return_entry
2850   // since compiled code callers expect the result to already be narrowed.
2851   if (state == itos) {
2852     __ narrow(R0_tos);
2853   }
2854   __ remove_activation(state, LR);
2855 
2856   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2857 
2858 #ifndef AARCH64
2859   // According to interpreter calling conventions, result is returned in R0/R1,
2860   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2861   // This conversion should be done after remove_activation, as it uses
2862   // push(state) & pop(state) to preserve return value.
2863   __ convert_tos_to_retval(state);
2864 #endif // !AARCH64
2865 
2866   __ ret();
2867 
2868   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2869   __ nop();
2870 }
2871 
2872 
2873 // ----------------------------------------------------------------------------
2874 // Volatile variables demand their effects be made known to all CPU's in
2875 // order.  Store buffers on most chips allow reads & writes to reorder; the
2876 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2877 // memory barrier (i.e., it's not sufficient that the interpreter does not
2878 // reorder volatile references, the hardware also must not reorder them).
2879 //
2880 // According to the new Java Memory Model (JMM):
2881 // (1) All volatiles are serialized wrt to each other.
2882 // ALSO reads & writes act as aquire & release, so:
2883 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2884 // the read float up to before the read.  It's OK for non-volatile memory refs
2885 // that happen before the volatile read to float down below it.
2886 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2887 // that happen BEFORE the write float down to after the write.  It's OK for
2888 // non-volatile memory refs that happen after the volatile write to float up
2889 // before it.
2890 //
2891 // We only put in barriers around volatile refs (they are expensive), not
2892 // _between_ memory refs (that would require us to track the flavor of the
2893 // previous memory refs).  Requirements (2) and (3) require some barriers
2894 // before volatile stores and after volatile loads.  These nearly cover
2895 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2896 // case is placed after volatile-stores although it could just as well go
2897 // before volatile-loads.
2898 // TODO-AARCH64: consider removing extra unused parameters
2899 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2900                                      Register tmp,
2901                                      bool preserve_flags,
2902                                      Register load_tgt) {
2903 #ifdef AARCH64
2904   __ membar(order_constraint);
2905 #else
2906   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2907 #endif
2908 }
2909 
2910 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2911 void TemplateTable::resolve_cache_and_index(int byte_no,
2912                                             Register Rcache,
2913                                             Register Rindex,
2914                                             size_t index_size) {
2915   assert_different_registers(Rcache, Rindex, Rtemp);
2916 
2917   Label resolved;
2918   Bytecodes::Code code = bytecode();
2919   switch (code) {
2920   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2921   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2922   }
2923 
2924   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2925   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
2926   __ cmp(Rtemp, code);  // have we resolved this bytecode?
2927   __ b(resolved, eq);
2928 
2929   // resolve first time through
2930   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2931   __ mov(R1, code);
2932   __ call_VM(noreg, entry, R1);
2933   // Update registers with resolved info
2934   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
2935   __ bind(resolved);
2936 }
2937 
2938 
2939 // The Rcache and Rindex registers must be set before call
2940 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
2941                                               Register Rindex,
2942                                               Register Roffset,
2943                                               Register Rflags,
2944                                               Register Robj,
2945                                               bool is_static = false) {
2946 
2947   assert_different_registers(Rcache, Rindex, Rtemp);
2948   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
2949 
2950   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2951 
2952   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2953 
2954   // Field offset
2955   __ ldr(Roffset, Address(Rtemp,
2956            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2957 
2958   // Flags
2959   __ ldr_u32(Rflags, Address(Rtemp,
2960            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2961 
2962   if (is_static) {
2963     __ ldr(Robj, Address(Rtemp,
2964              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2965     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2966     __ ldr(Robj, Address(Robj, mirror_offset));
2967     __ resolve_oop_handle(Robj);
2968   }
2969 }
2970 
2971 
2972 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2973 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2974                                                Register method,
2975                                                Register itable_index,
2976                                                Register flags,
2977                                                bool is_invokevirtual,
2978                                                bool is_invokevfinal/*unused*/,
2979                                                bool is_invokedynamic) {
2980   // setup registers
2981   const Register cache = R2_tmp;
2982   const Register index = R3_tmp;
2983   const Register temp_reg = Rtemp;
2984   assert_different_registers(cache, index, temp_reg);
2985   assert_different_registers(method, itable_index, temp_reg);
2986 
2987   // determine constant pool cache field offsets
2988   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2989   const int method_offset = in_bytes(
2990     ConstantPoolCache::base_offset() +
2991       ((byte_no == f2_byte)
2992        ? ConstantPoolCacheEntry::f2_offset()
2993        : ConstantPoolCacheEntry::f1_offset()
2994       )
2995     );
2996   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2997                                     ConstantPoolCacheEntry::flags_offset());
2998   // access constant pool cache fields
2999   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
3000                                     ConstantPoolCacheEntry::f2_offset());
3001 
3002   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
3003   resolve_cache_and_index(byte_no, cache, index, index_size);
3004     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
3005     __ ldr(method, Address(temp_reg, method_offset));
3006 
3007   if (itable_index != noreg) {
3008     __ ldr(itable_index, Address(temp_reg, index_offset));
3009   }
3010   __ ldr_u32(flags, Address(temp_reg, flags_offset));
3011 }
3012 
3013 
3014 // The registers cache and index expected to be set before call, and should not be Rtemp.
3015 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3016 // except cache and index registers which are preserved.
3017 void TemplateTable::jvmti_post_field_access(Register Rcache,
3018                                             Register Rindex,
3019                                             bool is_static,
3020                                             bool has_tos) {
3021   assert_different_registers(Rcache, Rindex, Rtemp);
3022 
3023   if (__ can_post_field_access()) {
3024     // Check to see if a field access watch has been set before we take
3025     // the time to call into the VM.
3026 
3027     Label Lcontinue;
3028 
3029     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
3030     __ cbz(Rtemp, Lcontinue);
3031 
3032     // cache entry pointer
3033     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3034     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
3035     if (is_static) {
3036       __ mov(R1, 0);        // NULL object reference
3037     } else {
3038       __ pop(atos);         // Get the object
3039       __ mov(R1, R0_tos);
3040       __ verify_oop(R1);
3041       __ push(atos);        // Restore stack state
3042     }
3043     // R1: object pointer or NULL
3044     // R2: cache entry pointer
3045     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3046                R1, R2);
3047     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3048 
3049     __ bind(Lcontinue);
3050   }
3051 }
3052 
3053 
3054 void TemplateTable::pop_and_check_object(Register r) {
3055   __ pop_ptr(r);
3056   __ null_check(r, Rtemp);  // for field access must check obj.
3057   __ verify_oop(r);
3058 }
3059 
3060 
3061 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3062   transition(vtos, vtos);
3063 
3064   const Register Roffset  = R2_tmp;
3065   const Register Robj     = R3_tmp;
3066   const Register Rcache   = R4_tmp;
3067   const Register Rflagsav = Rtmp_save0;  // R4/R19
3068   const Register Rindex   = R5_tmp;
3069   const Register Rflags   = R5_tmp;
3070 
3071   const bool gen_volatile_check = os::is_MP();
3072 
3073   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3074   jvmti_post_field_access(Rcache, Rindex, is_static, false);
3075   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3076 
3077   if (gen_volatile_check) {
3078     __ mov(Rflagsav, Rflags);
3079   }
3080 
3081   if (!is_static) pop_and_check_object(Robj);
3082 
3083   Label Done, Lint, Ltable, shouldNotReachHere;
3084   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3085 
3086   // compute type
3087   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3088   // Make sure we don't need to mask flags after the above shift
3089   ConstantPoolCacheEntry::verify_tos_state_shift();
3090 
3091   // There are actually two versions of implementation of getfield/getstatic:
3092   //
3093   // 32-bit ARM:
3094   // 1) Table switch using add(PC,...) instruction (fast_version)
3095   // 2) Table switch using ldr(PC,...) instruction
3096   //
3097   // AArch64:
3098   // 1) Table switch using adr/add/br instructions (fast_version)
3099   // 2) Table switch using adr/ldr/br instructions
3100   //
3101   // First version requires fixed size of code block for each case and
3102   // can not be used in RewriteBytecodes and VerifyOops
3103   // modes.
3104 
3105   // Size of fixed size code block for fast_version
3106   const int log_max_block_size = 2;
3107   const int max_block_size = 1 << log_max_block_size;
3108 
3109   // Decide if fast version is enabled
3110   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3111 
3112   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3113   // atos requires additional processing in slow version.
3114   // On AArch64 atos and itos cannot be merged.
3115   bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3116 
3117   assert(number_of_states == 10, "number of tos states should be equal to 9");
3118 
3119   __ cmp(Rflags, itos);
3120 #ifdef AARCH64
3121   __ b(Lint, eq);
3122 
3123   if(fast_version) {
3124     __ adr(Rtemp, Lbtos);
3125     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3126     __ br(Rtemp);
3127   } else {
3128     __ adr(Rtemp, Ltable);
3129     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3130     __ br(Rtemp);
3131   }
3132 #else
3133   if(atos_merged_with_itos) {
3134     __ cmp(Rflags, atos, ne);
3135   }
3136 
3137   // table switch by type
3138   if(fast_version) {
3139     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3140   } else {
3141     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3142   }
3143 
3144   // jump to itos/atos case
3145   __ b(Lint);
3146 #endif // AARCH64
3147 
3148   // table with addresses for slow version
3149   if (fast_version) {
3150     // nothing to do
3151   } else  {
3152     AARCH64_ONLY(__ align(wordSize));
3153     __ bind(Ltable);
3154     __ emit_address(Lbtos);
3155     __ emit_address(Lztos);
3156     __ emit_address(Lctos);
3157     __ emit_address(Lstos);
3158     __ emit_address(Litos);
3159     __ emit_address(Lltos);
3160     __ emit_address(Lftos);
3161     __ emit_address(Ldtos);
3162     __ emit_address(Latos);
3163   }
3164 
3165 #ifdef ASSERT
3166   int seq = 0;
3167 #endif
3168   // btos
3169   {
3170     assert(btos == seq++, "btos has unexpected value");
3171     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3172     __ bind(Lbtos);
3173     __ ldrsb(R0_tos, Address(Robj, Roffset));
3174     __ push(btos);
3175     // Rewrite bytecode to be faster
3176     if (!is_static && rc == may_rewrite) {
3177       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3178     }
3179     __ b(Done);
3180   }
3181 
3182   // ztos (same as btos for getfield)
3183   {
3184     assert(ztos == seq++, "btos has unexpected value");
3185     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3186     __ bind(Lztos);
3187     __ ldrsb(R0_tos, Address(Robj, Roffset));
3188     __ push(ztos);
3189     // Rewrite bytecode to be faster (use btos fast getfield)
3190     if (!is_static && rc == may_rewrite) {
3191       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3192     }
3193     __ b(Done);
3194   }
3195 
3196   // ctos
3197   {
3198     assert(ctos == seq++, "ctos has unexpected value");
3199     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3200     __ bind(Lctos);
3201     __ ldrh(R0_tos, Address(Robj, Roffset));
3202     __ push(ctos);
3203     if (!is_static && rc == may_rewrite) {
3204       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3205     }
3206     __ b(Done);
3207   }
3208 
3209   // stos
3210   {
3211     assert(stos == seq++, "stos has unexpected value");
3212     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3213     __ bind(Lstos);
3214     __ ldrsh(R0_tos, Address(Robj, Roffset));
3215     __ push(stos);
3216     if (!is_static && rc == may_rewrite) {
3217       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3218     }
3219     __ b(Done);
3220   }
3221 
3222   // itos
3223   {
3224     assert(itos == seq++, "itos has unexpected value");
3225     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3226     __ bind(Litos);
3227     __ b(shouldNotReachHere);
3228   }
3229 
3230   // ltos
3231   {
3232     assert(ltos == seq++, "ltos has unexpected value");
3233     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3234     __ bind(Lltos);
3235 #ifdef AARCH64
3236     __ ldr(R0_tos, Address(Robj, Roffset));
3237 #else
3238     __ add(Roffset, Robj, Roffset);
3239     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3240 #endif // AARCH64
3241     __ push(ltos);
3242     if (!is_static && rc == may_rewrite) {
3243       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3244     }
3245     __ b(Done);
3246   }
3247 
3248   // ftos
3249   {
3250     assert(ftos == seq++, "ftos has unexpected value");
3251     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3252     __ bind(Lftos);
3253     // floats and ints are placed on stack in same way, so
3254     // we can use push(itos) to transfer value without using VFP
3255     __ ldr_u32(R0_tos, Address(Robj, Roffset));
3256     __ push(itos);
3257     if (!is_static && rc == may_rewrite) {
3258       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3259     }
3260     __ b(Done);
3261   }
3262 
3263   // dtos
3264   {
3265     assert(dtos == seq++, "dtos has unexpected value");
3266     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3267     __ bind(Ldtos);
3268     // doubles and longs are placed on stack in the same way, so
3269     // we can use push(ltos) to transfer value without using VFP
3270 #ifdef AARCH64
3271     __ ldr(R0_tos, Address(Robj, Roffset));
3272 #else
3273     __ add(Rtemp, Robj, Roffset);
3274     __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3275 #endif // AARCH64
3276     __ push(ltos);
3277     if (!is_static && rc == may_rewrite) {
3278       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3279     }
3280     __ b(Done);
3281   }
3282 
3283   // atos
3284   {
3285     assert(atos == seq++, "atos has unexpected value");
3286 
3287     // atos case for AArch64 and slow version on 32-bit ARM
3288     if(!atos_merged_with_itos) {
3289       __ bind(Latos);
3290       __ load_heap_oop(R0_tos, Address(Robj, Roffset));
3291       __ push(atos);
3292       // Rewrite bytecode to be faster
3293       if (!is_static && rc == may_rewrite) {
3294         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3295       }
3296       __ b(Done);
3297     }
3298   }
3299 
3300   assert(vtos == seq++, "vtos has unexpected value");
3301 
3302   __ bind(shouldNotReachHere);
3303   __ should_not_reach_here();
3304 
3305   // itos and atos cases are frequent so it makes sense to move them out of table switch
3306   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3307 
3308   __ bind(Lint);
3309   __ ldr_s32(R0_tos, Address(Robj, Roffset));
3310   __ push(itos);
3311   // Rewrite bytecode to be faster
3312   if (!is_static && rc == may_rewrite) {
3313     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3314   }
3315 
3316   __ bind(Done);
3317 
3318   if (gen_volatile_check) {
3319     // Check for volatile field
3320     Label notVolatile;
3321     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3322 
3323     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3324 
3325     __ bind(notVolatile);
3326   }
3327 
3328 }
3329 
3330 void TemplateTable::getfield(int byte_no) {
3331   getfield_or_static(byte_no, false);
3332 }
3333 
3334 void TemplateTable::nofast_getfield(int byte_no) {
3335   getfield_or_static(byte_no, false, may_not_rewrite);
3336 }
3337 
3338 void TemplateTable::getstatic(int byte_no) {
3339   getfield_or_static(byte_no, true);
3340 }
3341 
3342 
3343 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3344 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3345 // except cache and index registers which are preserved.
3346 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3347   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3348   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3349 
3350   if (__ can_post_field_modification()) {
3351     // Check to see if a field modification watch has been set before we take
3352     // the time to call into the VM.
3353     Label Lcontinue;
3354 
3355     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3356     __ cbz(Rtemp, Lcontinue);
3357 
3358     if (is_static) {
3359       // Life is simple.  Null out the object pointer.
3360       __ mov(R1, 0);
3361     } else {
3362       // Life is harder. The stack holds the value on top, followed by the object.
3363       // We don't know the size of the value, though; it could be one or two words
3364       // depending on its type. As a result, we must find the type to determine where
3365       // the object is.
3366 
3367       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3368       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3369 
3370       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3371       // Make sure we don't need to mask Rtemp after the above shift
3372       ConstantPoolCacheEntry::verify_tos_state_shift();
3373 
3374       __ cmp(Rtemp, ltos);
3375       __ cond_cmp(Rtemp, dtos, ne);
3376 #ifdef AARCH64
3377       __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3378       __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3379       __ mov(R1, Rtemp, eq);
3380       __ ldr(R1, Address(Rstack_top, R1));
3381 #else
3382       // two word value (ltos/dtos)
3383       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3384 
3385       // one word value (not ltos, dtos)
3386       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3387 #endif // AARCH64
3388     }
3389 
3390     // cache entry pointer
3391     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3392     __ add(R2, R2, in_bytes(cp_base_offset));
3393 
3394     // object (tos)
3395     __ mov(R3, Rstack_top);
3396 
3397     // R1: object pointer set up above (NULL if static)
3398     // R2: cache entry pointer
3399     // R3: value object on the stack
3400     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3401                R1, R2, R3);
3402     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3403 
3404     __ bind(Lcontinue);
3405   }
3406 }
3407 
3408 
3409 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3410   transition(vtos, vtos);
3411 
3412   const Register Roffset  = R2_tmp;
3413   const Register Robj     = R3_tmp;
3414   const Register Rcache   = R4_tmp;
3415   const Register Rflagsav = Rtmp_save0;  // R4/R19
3416   const Register Rindex   = R5_tmp;
3417   const Register Rflags   = R5_tmp;
3418 
3419   const bool gen_volatile_check = os::is_MP();
3420 
3421   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3422   jvmti_post_field_mod(Rcache, Rindex, is_static);
3423   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3424 
3425   if (gen_volatile_check) {
3426     // Check for volatile field
3427     Label notVolatile;
3428     __ mov(Rflagsav, Rflags);
3429     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3430 
3431     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3432 
3433     __ bind(notVolatile);
3434   }
3435 
3436   Label Done, Lint, shouldNotReachHere;
3437   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3438 
3439   // compute type
3440   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3441   // Make sure we don't need to mask flags after the above shift
3442   ConstantPoolCacheEntry::verify_tos_state_shift();
3443 
3444   // There are actually two versions of implementation of putfield/putstatic:
3445   //
3446   // 32-bit ARM:
3447   // 1) Table switch using add(PC,...) instruction (fast_version)
3448   // 2) Table switch using ldr(PC,...) instruction
3449   //
3450   // AArch64:
3451   // 1) Table switch using adr/add/br instructions (fast_version)
3452   // 2) Table switch using adr/ldr/br instructions
3453   //
3454   // First version requires fixed size of code block for each case and
3455   // can not be used in RewriteBytecodes and VerifyOops
3456   // modes.
3457 
3458   // Size of fixed size code block for fast_version (in instructions)
3459   const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3460   const int max_block_size = 1 << log_max_block_size;
3461 
3462   // Decide if fast version is enabled
3463   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3464 
3465   assert(number_of_states == 10, "number of tos states should be equal to 9");
3466 
3467   // itos case is frequent and is moved outside table switch
3468   __ cmp(Rflags, itos);
3469 
3470 #ifdef AARCH64
3471   __ b(Lint, eq);
3472 
3473   if (fast_version) {
3474     __ adr(Rtemp, Lbtos);
3475     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3476     __ br(Rtemp);
3477   } else {
3478     __ adr(Rtemp, Ltable);
3479     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3480     __ br(Rtemp);
3481   }
3482 #else
3483   // table switch by type
3484   if (fast_version) {
3485     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3486   } else  {
3487     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3488   }
3489 
3490   // jump to itos case
3491   __ b(Lint);
3492 #endif // AARCH64
3493 
3494   // table with addresses for slow version
3495   if (fast_version) {
3496     // nothing to do
3497   } else  {
3498     AARCH64_ONLY(__ align(wordSize));
3499     __ bind(Ltable);
3500     __ emit_address(Lbtos);
3501     __ emit_address(Lztos);
3502     __ emit_address(Lctos);
3503     __ emit_address(Lstos);
3504     __ emit_address(Litos);
3505     __ emit_address(Lltos);
3506     __ emit_address(Lftos);
3507     __ emit_address(Ldtos);
3508     __ emit_address(Latos);
3509   }
3510 
3511 #ifdef ASSERT
3512   int seq = 0;
3513 #endif
3514   // btos
3515   {
3516     assert(btos == seq++, "btos has unexpected value");
3517     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3518     __ bind(Lbtos);
3519     __ pop(btos);
3520     if (!is_static) pop_and_check_object(Robj);
3521     __ strb(R0_tos, Address(Robj, Roffset));
3522     if (!is_static && rc == may_rewrite) {
3523       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3524     }
3525     __ b(Done);
3526   }
3527 
3528   // ztos
3529   {
3530     assert(ztos == seq++, "ztos has unexpected value");
3531     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3532     __ bind(Lztos);
3533     __ pop(ztos);
3534     if (!is_static) pop_and_check_object(Robj);
3535     __ and_32(R0_tos, R0_tos, 1);
3536     __ strb(R0_tos, Address(Robj, Roffset));
3537     if (!is_static && rc == may_rewrite) {
3538       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3539     }
3540     __ b(Done);
3541   }
3542 
3543   // ctos
3544   {
3545     assert(ctos == seq++, "ctos has unexpected value");
3546     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3547     __ bind(Lctos);
3548     __ pop(ctos);
3549     if (!is_static) pop_and_check_object(Robj);
3550     __ strh(R0_tos, Address(Robj, Roffset));
3551     if (!is_static && rc == may_rewrite) {
3552       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3553     }
3554     __ b(Done);
3555   }
3556 
3557   // stos
3558   {
3559     assert(stos == seq++, "stos has unexpected value");
3560     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3561     __ bind(Lstos);
3562     __ pop(stos);
3563     if (!is_static) pop_and_check_object(Robj);
3564     __ strh(R0_tos, Address(Robj, Roffset));
3565     if (!is_static && rc == may_rewrite) {
3566       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3567     }
3568     __ b(Done);
3569   }
3570 
3571   // itos
3572   {
3573     assert(itos == seq++, "itos has unexpected value");
3574     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3575     __ bind(Litos);
3576     __ b(shouldNotReachHere);
3577   }
3578 
3579   // ltos
3580   {
3581     assert(ltos == seq++, "ltos has unexpected value");
3582     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3583     __ bind(Lltos);
3584     __ pop(ltos);
3585     if (!is_static) pop_and_check_object(Robj);
3586 #ifdef AARCH64
3587     __ str(R0_tos, Address(Robj, Roffset));
3588 #else
3589     __ add(Roffset, Robj, Roffset);
3590     __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3591 #endif // AARCH64
3592     if (!is_static && rc == may_rewrite) {
3593       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3594     }
3595     __ b(Done);
3596   }
3597 
3598   // ftos
3599   {
3600     assert(ftos == seq++, "ftos has unexpected value");
3601     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3602     __ bind(Lftos);
3603     // floats and ints are placed on stack in the same way, so
3604     // we can use pop(itos) to transfer value without using VFP
3605     __ pop(itos);
3606     if (!is_static) pop_and_check_object(Robj);
3607     __ str_32(R0_tos, Address(Robj, Roffset));
3608     if (!is_static && rc == may_rewrite) {
3609       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3610     }
3611     __ b(Done);
3612   }
3613 
3614   // dtos
3615   {
3616     assert(dtos == seq++, "dtos has unexpected value");
3617     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3618     __ bind(Ldtos);
3619     // doubles and longs are placed on stack in the same way, so
3620     // we can use pop(ltos) to transfer value without using VFP
3621     __ pop(ltos);
3622     if (!is_static) pop_and_check_object(Robj);
3623 #ifdef AARCH64
3624     __ str(R0_tos, Address(Robj, Roffset));
3625 #else
3626     __ add(Rtemp, Robj, Roffset);
3627     __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3628 #endif // AARCH64
3629     if (!is_static && rc == may_rewrite) {
3630       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3631     }
3632     __ b(Done);
3633   }
3634 
3635   // atos
3636   {
3637     assert(atos == seq++, "dtos has unexpected value");
3638     __ bind(Latos);
3639     __ pop(atos);
3640     if (!is_static) pop_and_check_object(Robj);
3641     // Store into the field
3642     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, _bs->kind(), false, false);
3643     if (!is_static && rc == may_rewrite) {
3644       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3645     }
3646     __ b(Done);
3647   }
3648 
3649   __ bind(shouldNotReachHere);
3650   __ should_not_reach_here();
3651 
3652   // itos case is frequent and is moved outside table switch
3653   __ bind(Lint);
3654   __ pop(itos);
3655   if (!is_static) pop_and_check_object(Robj);
3656   __ str_32(R0_tos, Address(Robj, Roffset));
3657   if (!is_static && rc == may_rewrite) {
3658     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3659   }
3660 
3661   __ bind(Done);
3662 
3663   if (gen_volatile_check) {
3664     Label notVolatile;
3665     if (is_static) {
3666       // Just check for volatile. Memory barrier for static final field
3667       // is handled by class initialization.
3668       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3669       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3670       __ bind(notVolatile);
3671     } else {
3672       // Check for volatile field and final field
3673       Label skipMembar;
3674 
3675       __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3676                        1 << ConstantPoolCacheEntry::is_final_shift);
3677       __ b(skipMembar, eq);
3678 
3679       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3680 
3681       // StoreLoad barrier after volatile field write
3682       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3683       __ b(skipMembar);
3684 
3685       // StoreStore barrier after final field write
3686       __ bind(notVolatile);
3687       volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3688 
3689       __ bind(skipMembar);
3690     }
3691   }
3692 
3693 }
3694 
3695 void TemplateTable::putfield(int byte_no) {
3696   putfield_or_static(byte_no, false);
3697 }
3698 
3699 void TemplateTable::nofast_putfield(int byte_no) {
3700   putfield_or_static(byte_no, false, may_not_rewrite);
3701 }
3702 
3703 void TemplateTable::putstatic(int byte_no) {
3704   putfield_or_static(byte_no, true);
3705 }
3706 
3707 
3708 void TemplateTable::jvmti_post_fast_field_mod() {
3709   // This version of jvmti_post_fast_field_mod() is not used on ARM
3710   Unimplemented();
3711 }
3712 
3713 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3714 // but preserves tosca with the given state.
3715 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3716   if (__ can_post_field_modification()) {
3717     // Check to see if a field modification watch has been set before we take
3718     // the time to call into the VM.
3719     Label done;
3720 
3721     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3722     __ cbz(R2, done);
3723 
3724     __ pop_ptr(R3);               // copy the object pointer from tos
3725     __ verify_oop(R3);
3726     __ push_ptr(R3);              // put the object pointer back on tos
3727 
3728     __ push(state);               // save value on the stack
3729 
3730     // access constant pool cache entry
3731     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3732 
3733     __ mov(R1, R3);
3734     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3735     __ mov(R3, Rstack_top); // put tos addr into R3
3736 
3737     // R1: object pointer copied above
3738     // R2: cache entry pointer
3739     // R3: jvalue object on the stack
3740     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3741 
3742     __ pop(state);                // restore value
3743 
3744     __ bind(done);
3745   }
3746 }
3747 
3748 
3749 void TemplateTable::fast_storefield(TosState state) {
3750   transition(state, vtos);
3751 
3752   ByteSize base = ConstantPoolCache::base_offset();
3753 
3754   jvmti_post_fast_field_mod(state);
3755 
3756   const Register Rcache  = R2_tmp;
3757   const Register Rindex  = R3_tmp;
3758   const Register Roffset = R3_tmp;
3759   const Register Rflags  = Rtmp_save0; // R4/R19
3760   const Register Robj    = R5_tmp;
3761 
3762   const bool gen_volatile_check = os::is_MP();
3763 
3764   // access constant pool cache
3765   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3766 
3767   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3768 
3769   if (gen_volatile_check) {
3770     // load flags to test volatile
3771     __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3772   }
3773 
3774   // replace index with field offset from cache entry
3775   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3776 
3777   if (gen_volatile_check) {
3778     // Check for volatile store
3779     Label notVolatile;
3780     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3781 
3782     // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3783     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3784 
3785     __ bind(notVolatile);
3786   }
3787 
3788   // Get object from stack
3789   pop_and_check_object(Robj);
3790 
3791   // access field
3792   switch (bytecode()) {
3793     case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3794                                      // fall through
3795     case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3796     case Bytecodes::_fast_sputfield: // fall through
3797     case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3798     case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3799 #ifdef AARCH64
3800     case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
3801     case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3802     case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3803 #else
3804     case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3805                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3806 
3807 #ifdef __SOFTFP__
3808     case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
3809     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3810                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3811 #else
3812     case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3813                                      __ fsts(S0_tos, Address(Robj));          break;
3814     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3815                                      __ fstd(D0_tos, Address(Robj));          break;
3816 #endif // __SOFTFP__
3817 #endif // AARCH64
3818 
3819     case Bytecodes::_fast_aputfield:
3820       do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, _bs->kind(), false, false);
3821       break;
3822 
3823     default:
3824       ShouldNotReachHere();
3825   }
3826 
3827   if (gen_volatile_check) {
3828     Label notVolatile;
3829     Label skipMembar;
3830     __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3831                    1 << ConstantPoolCacheEntry::is_final_shift);
3832     __ b(skipMembar, eq);
3833 
3834     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3835 
3836     // StoreLoad barrier after volatile field write
3837     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3838     __ b(skipMembar);
3839 
3840     // StoreStore barrier after final field write
3841     __ bind(notVolatile);
3842     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3843 
3844     __ bind(skipMembar);
3845   }
3846 }
3847 
3848 
3849 void TemplateTable::fast_accessfield(TosState state) {
3850   transition(atos, state);
3851 
3852   // do the JVMTI work here to avoid disturbing the register state below
3853   if (__ can_post_field_access()) {
3854     // Check to see if a field access watch has been set before we take
3855     // the time to call into the VM.
3856     Label done;
3857     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3858     __ cbz(R2, done);
3859     // access constant pool cache entry
3860     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3861     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3862     __ verify_oop(R0_tos);
3863     __ mov(R1, R0_tos);
3864     // R1: object pointer copied above
3865     // R2: cache entry pointer
3866     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3867     __ pop_ptr(R0_tos);   // restore object pointer
3868 
3869     __ bind(done);
3870   }
3871 
3872   const Register Robj    = R0_tos;
3873   const Register Rcache  = R2_tmp;
3874   const Register Rflags  = R2_tmp;
3875   const Register Rindex  = R3_tmp;
3876   const Register Roffset = R3_tmp;
3877 
3878   const bool gen_volatile_check = os::is_MP();
3879 
3880   // access constant pool cache
3881   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3882   // replace index with field offset from cache entry
3883   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3884   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3885 
3886   if (gen_volatile_check) {
3887     // load flags to test volatile
3888     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3889   }
3890 
3891   __ verify_oop(Robj);
3892   __ null_check(Robj, Rtemp);
3893 
3894   // access field
3895   switch (bytecode()) {
3896     case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
3897     case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
3898     case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
3899     case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
3900 #ifdef AARCH64
3901     case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3902     case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
3903     case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
3904 #else
3905     case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
3906                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3907 #ifdef __SOFTFP__
3908     case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3909     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
3910                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3911 #else
3912     case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
3913     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
3914 #endif // __SOFTFP__
3915 #endif // AARCH64
3916     case Bytecodes::_fast_agetfield: __ load_heap_oop(R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
3917     default:
3918       ShouldNotReachHere();
3919   }
3920 
3921   if (gen_volatile_check) {
3922     // Check for volatile load
3923     Label notVolatile;
3924     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3925 
3926     // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
3927     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3928 
3929     __ bind(notVolatile);
3930   }
3931 }
3932 
3933 
3934 void TemplateTable::fast_xaccess(TosState state) {
3935   transition(vtos, state);
3936 
3937   const Register Robj = R1_tmp;
3938   const Register Rcache = R2_tmp;
3939   const Register Rindex = R3_tmp;
3940   const Register Roffset = R3_tmp;
3941   const Register Rflags = R4_tmp;
3942   Label done;
3943 
3944   // get receiver
3945   __ ldr(Robj, aaddress(0));
3946 
3947   // access constant pool cache
3948   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
3949   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3950   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3951 
3952   const bool gen_volatile_check = os::is_MP();
3953 
3954   if (gen_volatile_check) {
3955     // load flags to test volatile
3956     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3957   }
3958 
3959   // make sure exception is reported in correct bcp range (getfield is next instruction)
3960   __ add(Rbcp, Rbcp, 1);
3961   __ null_check(Robj, Rtemp);
3962   __ sub(Rbcp, Rbcp, 1);
3963 
3964 #ifdef AARCH64
3965   if (gen_volatile_check) {
3966     Label notVolatile;
3967     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3968 
3969     __ add(Rtemp, Robj, Roffset);
3970 
3971     if (state == itos) {
3972       __ ldar_w(R0_tos, Rtemp);
3973     } else if (state == atos) {
3974       if (UseCompressedOops) {
3975         __ ldar_w(R0_tos, Rtemp);
3976         __ decode_heap_oop(R0_tos);
3977       } else {
3978         __ ldar(R0_tos, Rtemp);
3979       }
3980       __ verify_oop(R0_tos);
3981     } else if (state == ftos) {
3982       __ ldar_w(R0_tos, Rtemp);
3983       __ fmov_sw(S0_tos, R0_tos);
3984     } else {
3985       ShouldNotReachHere();
3986     }
3987     __ b(done);
3988 
3989     __ bind(notVolatile);
3990   }
3991 #endif // AARCH64
3992 
3993   if (state == itos) {
3994     __ ldr_s32(R0_tos, Address(Robj, Roffset));
3995   } else if (state == atos) {
3996     __ load_heap_oop(R0_tos, Address(Robj, Roffset));
3997     __ verify_oop(R0_tos);
3998   } else if (state == ftos) {
3999 #ifdef AARCH64
4000     __ ldr_s(S0_tos, Address(Robj, Roffset));
4001 #else
4002 #ifdef __SOFTFP__
4003     __ ldr(R0_tos, Address(Robj, Roffset));
4004 #else
4005     __ add(Roffset, Robj, Roffset);
4006     __ flds(S0_tos, Address(Roffset));
4007 #endif // __SOFTFP__
4008 #endif // AARCH64
4009   } else {
4010     ShouldNotReachHere();
4011   }
4012 
4013 #ifndef AARCH64
4014   if (gen_volatile_check) {
4015     // Check for volatile load
4016     Label notVolatile;
4017     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4018 
4019     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4020 
4021     __ bind(notVolatile);
4022   }
4023 #endif // !AARCH64
4024 
4025   __ bind(done);
4026 }
4027 
4028 
4029 
4030 //----------------------------------------------------------------------------------------------------
4031 // Calls
4032 
4033 void TemplateTable::count_calls(Register method, Register temp) {
4034   // implemented elsewhere
4035   ShouldNotReachHere();
4036 }
4037 
4038 
4039 void TemplateTable::prepare_invoke(int byte_no,
4040                                    Register method,  // linked method (or i-klass)
4041                                    Register index,   // itable index, MethodType, etc.
4042                                    Register recv,    // if caller wants to see it
4043                                    Register flags    // if caller wants to test it
4044                                    ) {
4045   // determine flags
4046   const Bytecodes::Code code = bytecode();
4047   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
4048   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
4049   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
4050   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
4051   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
4052   const bool load_receiver       = (recv != noreg);
4053   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4054   assert(recv  == noreg || recv  == R2, "");
4055   assert(flags == noreg || flags == R3, "");
4056 
4057   // setup registers & access constant pool cache
4058   if (recv  == noreg)  recv  = R2;
4059   if (flags == noreg)  flags = R3;
4060   const Register temp = Rtemp;
4061   const Register ret_type = R1_tmp;
4062   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4063 
4064   // save 'interpreter return address'
4065   __ save_bcp();
4066 
4067   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4068 
4069   // maybe push extra argument
4070   if (is_invokedynamic || is_invokehandle) {
4071     Label L_no_push;
4072     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4073     __ mov(temp, index);
4074     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4075     __ load_resolved_reference_at_index(index, temp);
4076     __ verify_oop(index);
4077     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
4078     __ bind(L_no_push);
4079   }
4080 
4081   // load receiver if needed (after extra argument is pushed so parameter size is correct)
4082   if (load_receiver) {
4083     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
4084     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4085     __ ldr(recv, recv_addr);
4086     __ verify_oop(recv);
4087   }
4088 
4089   // compute return type
4090   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4091   // Make sure we don't need to mask flags after the above shift
4092   ConstantPoolCacheEntry::verify_tos_state_shift();
4093   // load return address
4094   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4095     __ mov_slow(temp, table);
4096     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4097   }
4098 }
4099 
4100 
4101 void TemplateTable::invokevirtual_helper(Register index,
4102                                          Register recv,
4103                                          Register flags) {
4104 
4105   const Register recv_klass = R2_tmp;
4106 
4107   assert_different_registers(index, recv, flags, Rtemp);
4108   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4109 
4110   // Test for an invoke of a final method
4111   Label notFinal;
4112   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4113 
4114   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4115 
4116   // do the call - the index is actually the method to call
4117 
4118   // It's final, need a null check here!
4119   __ null_check(recv, Rtemp);
4120 
4121   // profile this call
4122   __ profile_final_call(R0_tmp);
4123 
4124   __ jump_from_interpreted(Rmethod);
4125 
4126   __ bind(notFinal);
4127 
4128   // get receiver klass
4129   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4130   __ load_klass(recv_klass, recv);
4131 
4132   // profile this call
4133   __ profile_virtual_call(R0_tmp, recv_klass);
4134 
4135   // get target Method* & entry point
4136   const int base = in_bytes(Klass::vtable_start_offset());
4137   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4138   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4139   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4140   __ jump_from_interpreted(Rmethod);
4141 }
4142 
4143 void TemplateTable::invokevirtual(int byte_no) {
4144   transition(vtos, vtos);
4145   assert(byte_no == f2_byte, "use this argument");
4146 
4147   const Register Rrecv  = R2_tmp;
4148   const Register Rflags = R3_tmp;
4149 
4150   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4151 
4152   // Rmethod: index
4153   // Rrecv:   receiver
4154   // Rflags:  flags
4155   // LR:      return address
4156 
4157   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4158 }
4159 
4160 
4161 void TemplateTable::invokespecial(int byte_no) {
4162   transition(vtos, vtos);
4163   assert(byte_no == f1_byte, "use this argument");
4164   const Register Rrecv  = R2_tmp;
4165   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4166   __ verify_oop(Rrecv);
4167   __ null_check(Rrecv, Rtemp);
4168   // do the call
4169   __ profile_call(Rrecv);
4170   __ jump_from_interpreted(Rmethod);
4171 }
4172 
4173 
4174 void TemplateTable::invokestatic(int byte_no) {
4175   transition(vtos, vtos);
4176   assert(byte_no == f1_byte, "use this argument");
4177   prepare_invoke(byte_no, Rmethod);
4178   // do the call
4179   __ profile_call(R2_tmp);
4180   __ jump_from_interpreted(Rmethod);
4181 }
4182 
4183 
4184 void TemplateTable::fast_invokevfinal(int byte_no) {
4185   transition(vtos, vtos);
4186   assert(byte_no == f2_byte, "use this argument");
4187   __ stop("fast_invokevfinal is not used on ARM");
4188 }
4189 
4190 
4191 void TemplateTable::invokeinterface(int byte_no) {
4192   transition(vtos, vtos);
4193   assert(byte_no == f1_byte, "use this argument");
4194 
4195   const Register Ritable = R1_tmp;
4196   const Register Rrecv   = R2_tmp;
4197   const Register Rinterf = R5_tmp;
4198   const Register Rindex  = R4_tmp;
4199   const Register Rflags  = R3_tmp;
4200   const Register Rklass  = R3_tmp;
4201 
4202   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4203 
4204   // Special case of invokeinterface called for virtual method of
4205   // java.lang.Object.  See cpCacheOop.cpp for details.
4206   // This code isn't produced by javac, but could be produced by
4207   // another compliant java compiler.
4208   Label notMethod;
4209   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
4210 
4211   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4212   __ bind(notMethod);
4213 
4214   // Get receiver klass into Rklass - also a null check
4215   __ load_klass(Rklass, Rrecv);
4216 
4217   Label no_such_interface;
4218 
4219   // Receiver subtype check against REFC.
4220   __ lookup_interface_method(// inputs: rec. class, interface
4221                              Rklass, Rinterf, noreg,
4222                              // outputs:  scan temp. reg1, scan temp. reg2
4223                              noreg, Ritable, Rtemp,
4224                              no_such_interface);
4225 
4226   // profile this call
4227   __ profile_virtual_call(R0_tmp, Rklass);
4228 
4229   // Get declaring interface class from method
4230   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4231   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4232   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4233 
4234   // Get itable index from method
4235   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4236   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4237   __ neg(Rindex, Rtemp);
4238 
4239   __ lookup_interface_method(// inputs: rec. class, interface
4240                              Rklass, Rinterf, Rindex,
4241                              // outputs:  scan temp. reg1, scan temp. reg2
4242                              Rmethod, Ritable, Rtemp,
4243                              no_such_interface);
4244 
4245   // Rmethod: Method* to call
4246 
4247   // Check for abstract method error
4248   // Note: This should be done more efficiently via a throw_abstract_method_error
4249   //       interpreter entry point and a conditional jump to it in case of a null
4250   //       method.
4251   { Label L;
4252     __ cbnz(Rmethod, L);
4253     // throw exception
4254     // note: must restore interpreter registers to canonical
4255     //       state for exception handling to work correctly!
4256     __ restore_method();
4257     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4258     // the call_VM checks for exception, so we should never return here.
4259     __ should_not_reach_here();
4260     __ bind(L);
4261   }
4262 
4263   // do the call
4264   __ jump_from_interpreted(Rmethod);
4265 
4266   // throw exception
4267   __ bind(no_such_interface);
4268   __ restore_method();
4269   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4270   // the call_VM checks for exception, so we should never return here.
4271   __ should_not_reach_here();
4272 }
4273 
4274 void TemplateTable::invokehandle(int byte_no) {
4275   transition(vtos, vtos);
4276 
4277   // TODO-AARCH64 review register usage
4278   const Register Rrecv  = R2_tmp;
4279   const Register Rmtype = R4_tmp;
4280   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4281 
4282   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4283   __ null_check(Rrecv, Rtemp);
4284 
4285   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
4286   // Rmethod: MH.invokeExact_MT method (from f2)
4287 
4288   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
4289 
4290   // do the call
4291   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
4292   __ mov(Rmethod, R5_method);
4293   __ jump_from_interpreted(Rmethod);
4294 }
4295 
4296 void TemplateTable::invokedynamic(int byte_no) {
4297   transition(vtos, vtos);
4298 
4299   // TODO-AARCH64 review register usage
4300   const Register Rcallsite = R4_tmp;
4301   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4302 
4303   prepare_invoke(byte_no, R5_method, Rcallsite);
4304 
4305   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4306   // Rmethod:   MH.linkToCallSite method (from f2)
4307 
4308   // Note:  Rcallsite is already pushed by prepare_invoke
4309 
4310   if (ProfileInterpreter) {
4311     __ profile_call(R2_tmp);
4312   }
4313 
4314   // do the call
4315   __ mov(Rmethod, R5_method);
4316   __ jump_from_interpreted(Rmethod);
4317 }
4318 
4319 //----------------------------------------------------------------------------------------------------
4320 // Allocation
4321 
4322 void TemplateTable::_new() {
4323   transition(vtos, atos);
4324 
4325   const Register Robj   = R0_tos;
4326   const Register Rcpool = R1_tmp;
4327   const Register Rindex = R2_tmp;
4328   const Register Rtags  = R3_tmp;
4329   const Register Rsize  = R3_tmp;
4330 
4331   Register Rklass = R4_tmp;
4332   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4333   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4334 
4335   Label slow_case;
4336   Label done;
4337   Label initialize_header;
4338   Label initialize_object;  // including clearing the fields
4339 
4340   const bool allow_shared_alloc =
4341     Universe::heap()->supports_inline_contig_alloc();
4342 
4343   // Literals
4344   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4345 
4346   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4347   __ get_cpool_and_tags(Rcpool, Rtags);
4348 
4349   // Make sure the class we're about to instantiate has been resolved.
4350   // This is done before loading InstanceKlass to be consistent with the order
4351   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4352   const int tags_offset = Array<u1>::base_offset_in_bytes();
4353   __ add(Rtemp, Rtags, Rindex);
4354 
4355 #ifdef AARCH64
4356   __ add(Rtemp, Rtemp, tags_offset);
4357   __ ldarb(Rtemp, Rtemp);
4358 #else
4359   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4360 
4361   // use Rklass as a scratch
4362   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4363 #endif // AARCH64
4364 
4365   // get InstanceKlass
4366   __ cmp(Rtemp, JVM_CONSTANT_Class);
4367   __ b(slow_case, ne);
4368   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4369 
4370   // make sure klass is initialized & doesn't have finalizer
4371   // make sure klass is fully initialized
4372   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4373   __ cmp(Rtemp, InstanceKlass::fully_initialized);
4374   __ b(slow_case, ne);
4375 
4376   // get instance_size in InstanceKlass (scaled to a count of bytes)
4377   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4378 
4379   // test to see if it has a finalizer or is malformed in some way
4380   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4381   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4382 
4383   // Allocate the instance:
4384   //  If TLAB is enabled:
4385   //    Try to allocate in the TLAB.
4386   //    If fails, go to the slow path.
4387   //  Else If inline contiguous allocations are enabled:
4388   //    Try to allocate in eden.
4389   //    If fails due to heap end, go to slow path.
4390   //
4391   //  If TLAB is enabled OR inline contiguous is enabled:
4392   //    Initialize the allocation.
4393   //    Exit.
4394   //
4395   //  Go to slow path.
4396   if (UseTLAB) {
4397     const Register Rtlab_top = R1_tmp;
4398     const Register Rtlab_end = R2_tmp;
4399     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4400 
4401     __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4402     __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset())));
4403     __ add(Rtlab_top, Robj, Rsize);
4404     __ cmp(Rtlab_top, Rtlab_end);
4405     __ b(slow_case, hi);
4406     __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4407     if (ZeroTLAB) {
4408       // the fields have been already cleared
4409       __ b(initialize_header);
4410     } else {
4411       // initialize both the header and fields
4412       __ b(initialize_object);
4413     }
4414   } else {
4415     // Allocation in the shared Eden, if allowed.
4416     if (allow_shared_alloc) {
4417       const Register Rheap_top_addr = R2_tmp;
4418       const Register Rheap_top = R5_tmp;
4419       const Register Rheap_end = Rtemp;
4420       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4421 
4422       // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4423       __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4424 
4425       Label retry;
4426       __ bind(retry);
4427 
4428 #ifdef AARCH64
4429       __ ldxr(Robj, Rheap_top_addr);
4430 #else
4431       __ ldr(Robj, Address(Rheap_top_addr));
4432 #endif // AARCH64
4433 
4434       __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4435       __ add(Rheap_top, Robj, Rsize);
4436       __ cmp(Rheap_top, Rheap_end);
4437       __ b(slow_case, hi);
4438 
4439       // Update heap top atomically.
4440       // If someone beats us on the allocation, try again, otherwise continue.
4441 #ifdef AARCH64
4442       __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4443       __ cbnz_w(Rtemp2, retry);
4444 #else
4445       __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4446       __ b(retry, ne);
4447 #endif // AARCH64
4448 
4449       __ incr_allocated_bytes(Rsize, Rtemp);
4450     }
4451   }
4452 
4453   if (UseTLAB || allow_shared_alloc) {
4454     const Register Rzero0 = R1_tmp;
4455     const Register Rzero1 = R2_tmp;
4456     const Register Rzero_end = R5_tmp;
4457     const Register Rzero_cur = Rtemp;
4458     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4459 
4460     // The object is initialized before the header.  If the object size is
4461     // zero, go directly to the header initialization.
4462     __ bind(initialize_object);
4463     __ subs(Rsize, Rsize, sizeof(oopDesc));
4464     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4465     __ b(initialize_header, eq);
4466 
4467 #ifdef ASSERT
4468     // make sure Rsize is a multiple of 8
4469     Label L;
4470     __ tst(Rsize, 0x07);
4471     __ b(L, eq);
4472     __ stop("object size is not multiple of 8 - adjust this code");
4473     __ bind(L);
4474 #endif
4475 
4476 #ifdef AARCH64
4477     {
4478       Label loop;
4479       // Step back by 1 word if object size is not a multiple of 2*wordSize.
4480       assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4481       __ andr(Rtemp2, Rsize, (uintx)wordSize);
4482       __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4483 
4484       // Zero by 2 words per iteration.
4485       __ bind(loop);
4486       __ subs(Rsize, Rsize, 2*wordSize);
4487       __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4488       __ b(loop, gt);
4489     }
4490 #else
4491     __ mov(Rzero0, 0);
4492     __ mov(Rzero1, 0);
4493     __ add(Rzero_end, Rzero_cur, Rsize);
4494 
4495     // initialize remaining object fields: Rsize was a multiple of 8
4496     { Label loop;
4497       // loop is unrolled 2 times
4498       __ bind(loop);
4499       // #1
4500       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4501       __ cmp(Rzero_cur, Rzero_end);
4502       // #2
4503       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4504       __ cmp(Rzero_cur, Rzero_end, ne);
4505       __ b(loop, ne);
4506     }
4507 #endif // AARCH64
4508 
4509     // initialize object header only.
4510     __ bind(initialize_header);
4511     if (UseBiasedLocking) {
4512       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4513     } else {
4514       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4515     }
4516     // mark
4517     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4518 
4519     // klass
4520 #ifdef AARCH64
4521     __ store_klass_gap(Robj);
4522 #endif // AARCH64
4523     __ store_klass(Rklass, Robj); // blows Rklass:
4524     Rklass = noreg;
4525 
4526     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4527     if (DTraceAllocProbes) {
4528       // Trigger dtrace event for fastpath
4529       Label Lcontinue;
4530 
4531       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4532       __ cbz(Rtemp, Lcontinue);
4533 
4534       __ push(atos);
4535       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4536       __ pop(atos);
4537 
4538       __ bind(Lcontinue);
4539     }
4540 
4541     __ b(done);
4542   } else {
4543     // jump over literals
4544     __ b(slow_case);
4545   }
4546 
4547   if (allow_shared_alloc) {
4548     __ bind_literal(Lheap_top_addr);
4549   }
4550 
4551   // slow case
4552   __ bind(slow_case);
4553   __ get_constant_pool(Rcpool);
4554   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4555   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4556 
4557   // continue
4558   __ bind(done);
4559 
4560   // StoreStore barrier required after complete initialization
4561   // (headers + content zeroing), before the object may escape.
4562   __ membar(MacroAssembler::StoreStore, R1_tmp);
4563 }
4564 
4565 
4566 void TemplateTable::newarray() {
4567   transition(itos, atos);
4568   __ ldrb(R1, at_bcp(1));
4569   __ mov(R2, R0_tos);
4570   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4571   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4572 }
4573 
4574 
4575 void TemplateTable::anewarray() {
4576   transition(itos, atos);
4577   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4578   __ get_constant_pool(R1);
4579   __ mov(R3, R0_tos);
4580   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4581   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4582 }
4583 
4584 
4585 void TemplateTable::arraylength() {
4586   transition(atos, itos);
4587   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4588   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4589 }
4590 
4591 
4592 void TemplateTable::checkcast() {
4593   transition(atos, atos);
4594   Label done, is_null, quicked, resolved, throw_exception;
4595 
4596   const Register Robj = R0_tos;
4597   const Register Rcpool = R2_tmp;
4598   const Register Rtags = R3_tmp;
4599   const Register Rindex = R4_tmp;
4600   const Register Rsuper = R3_tmp;
4601   const Register Rsub   = R4_tmp;
4602   const Register Rsubtype_check_tmp1 = R1_tmp;
4603   const Register Rsubtype_check_tmp2 = LR_tmp;
4604 
4605   __ cbz(Robj, is_null);
4606 
4607   // Get cpool & tags index
4608   __ get_cpool_and_tags(Rcpool, Rtags);
4609   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4610 
4611   // See if bytecode has already been quicked
4612   __ add(Rtemp, Rtags, Rindex);
4613 #ifdef AARCH64
4614   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4615   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4616   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4617 #else
4618   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4619 #endif // AARCH64
4620 
4621   __ cmp(Rtemp, JVM_CONSTANT_Class);
4622 
4623 #ifndef AARCH64
4624   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4625 #endif // !AARCH64
4626 
4627   __ b(quicked, eq);
4628 
4629   __ push(atos);
4630   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4631   // vm_result_2 has metadata result
4632   __ get_vm_result_2(Rsuper, Robj);
4633   __ pop_ptr(Robj);
4634   __ b(resolved);
4635 
4636   __ bind(throw_exception);
4637   // Come here on failure of subtype check
4638   __ profile_typecheck_failed(R1_tmp);
4639   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4640   __ b(Interpreter::_throw_ClassCastException_entry);
4641 
4642   // Get superklass in Rsuper and subklass in Rsub
4643   __ bind(quicked);
4644   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4645 
4646   __ bind(resolved);
4647   __ load_klass(Rsub, Robj);
4648 
4649   // Generate subtype check. Blows both tmps and Rtemp.
4650   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4651   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4652 
4653   // Come here on success
4654 
4655   // Collect counts on whether this check-cast sees NULLs a lot or not.
4656   if (ProfileInterpreter) {
4657     __ b(done);
4658     __ bind(is_null);
4659     __ profile_null_seen(R1_tmp);
4660   } else {
4661     __ bind(is_null);   // same as 'done'
4662   }
4663   __ bind(done);
4664 }
4665 
4666 
4667 void TemplateTable::instanceof() {
4668   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4669   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4670 
4671   transition(atos, itos);
4672   Label done, is_null, not_subtype, quicked, resolved;
4673 
4674   const Register Robj = R0_tos;
4675   const Register Rcpool = R2_tmp;
4676   const Register Rtags = R3_tmp;
4677   const Register Rindex = R4_tmp;
4678   const Register Rsuper = R3_tmp;
4679   const Register Rsub   = R4_tmp;
4680   const Register Rsubtype_check_tmp1 = R0_tmp;
4681   const Register Rsubtype_check_tmp2 = R1_tmp;
4682 
4683   __ cbz(Robj, is_null);
4684 
4685   __ load_klass(Rsub, Robj);
4686 
4687   // Get cpool & tags index
4688   __ get_cpool_and_tags(Rcpool, Rtags);
4689   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4690 
4691   // See if bytecode has already been quicked
4692   __ add(Rtemp, Rtags, Rindex);
4693 #ifdef AARCH64
4694   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4695   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4696   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4697 #else
4698   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4699 #endif // AARCH64
4700   __ cmp(Rtemp, JVM_CONSTANT_Class);
4701 
4702 #ifndef AARCH64
4703   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4704 #endif // !AARCH64
4705 
4706   __ b(quicked, eq);
4707 
4708   __ push(atos);
4709   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4710   // vm_result_2 has metadata result
4711   __ get_vm_result_2(Rsuper, Robj);
4712   __ pop_ptr(Robj);
4713   __ b(resolved);
4714 
4715   // Get superklass in Rsuper and subklass in Rsub
4716   __ bind(quicked);
4717   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4718 
4719   __ bind(resolved);
4720   __ load_klass(Rsub, Robj);
4721 
4722   // Generate subtype check. Blows both tmps and Rtemp.
4723   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4724 
4725   // Come here on success
4726   __ mov(R0_tos, 1);
4727   __ b(done);
4728 
4729   __ bind(not_subtype);
4730   // Come here on failure
4731   __ profile_typecheck_failed(R1_tmp);
4732   __ mov(R0_tos, 0);
4733 
4734   // Collect counts on whether this test sees NULLs a lot or not.
4735   if (ProfileInterpreter) {
4736     __ b(done);
4737     __ bind(is_null);
4738     __ profile_null_seen(R1_tmp);
4739   } else {
4740     __ bind(is_null);   // same as 'done'
4741   }
4742   __ bind(done);
4743 }
4744 
4745 
4746 //----------------------------------------------------------------------------------------------------
4747 // Breakpoints
4748 void TemplateTable::_breakpoint() {
4749 
4750   // Note: We get here even if we are single stepping..
4751   // jbug inists on setting breakpoints at every bytecode
4752   // even if we are in single step mode.
4753 
4754   transition(vtos, vtos);
4755 
4756   // get the unpatched byte code
4757   __ mov(R1, Rmethod);
4758   __ mov(R2, Rbcp);
4759   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4760 #ifdef AARCH64
4761   __ sxtw(Rtmp_save0, R0);
4762 #else
4763   __ mov(Rtmp_save0, R0);
4764 #endif // AARCH64
4765 
4766   // post the breakpoint event
4767   __ mov(R1, Rmethod);
4768   __ mov(R2, Rbcp);
4769   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4770 
4771   // complete the execution of original bytecode
4772   __ mov(R3_bytecode, Rtmp_save0);
4773   __ dispatch_only_normal(vtos);
4774 }
4775 
4776 
4777 //----------------------------------------------------------------------------------------------------
4778 // Exceptions
4779 
4780 void TemplateTable::athrow() {
4781   transition(atos, vtos);
4782   __ mov(Rexception_obj, R0_tos);
4783   __ null_check(Rexception_obj, Rtemp);
4784   __ b(Interpreter::throw_exception_entry());
4785 }
4786 
4787 
4788 //----------------------------------------------------------------------------------------------------
4789 // Synchronization
4790 //
4791 // Note: monitorenter & exit are symmetric routines; which is reflected
4792 //       in the assembly code structure as well
4793 //
4794 // Stack layout:
4795 //
4796 // [expressions  ] <--- Rstack_top        = expression stack top
4797 // ..
4798 // [expressions  ]
4799 // [monitor entry] <--- monitor block top = expression stack bot
4800 // ..
4801 // [monitor entry]
4802 // [frame data   ] <--- monitor block bot
4803 // ...
4804 // [saved FP     ] <--- FP
4805 
4806 
4807 void TemplateTable::monitorenter() {
4808   transition(atos, vtos);
4809 
4810   const Register Robj = R0_tos;
4811   const Register Rentry = R1_tmp;
4812 
4813   // check for NULL object
4814   __ null_check(Robj, Rtemp);
4815 
4816   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4817   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4818   Label allocate_monitor, allocated;
4819 
4820   // initialize entry pointer
4821   __ mov(Rentry, 0);                             // points to free slot or NULL
4822 
4823   // find a free slot in the monitor block (result in Rentry)
4824   { Label loop, exit;
4825     const Register Rcur = R2_tmp;
4826     const Register Rcur_obj = Rtemp;
4827     const Register Rbottom = R3_tmp;
4828     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4829 
4830     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4831                                  // points to current entry, starting with top-most entry
4832     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4833                                  // points to word before bottom of monitor block
4834 
4835     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4836 #ifndef AARCH64
4837     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4838                                                  // prefetch monitor's object for the first iteration
4839 #endif // !AARCH64
4840     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4841 
4842     __ bind(loop);
4843 #ifdef AARCH64
4844     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4845 #endif // AARCH64
4846     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4847     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4848 
4849     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4850     __ b(exit, eq);                              // if same object then stop searching
4851 
4852     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4853 
4854     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4855 #ifndef AARCH64
4856     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4857                                                  // prefetch monitor's object for the next iteration
4858 #endif // !AARCH64
4859     __ b(loop, ne);                              // if not at bottom then check this entry
4860     __ bind(exit);
4861   }
4862 
4863   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4864 
4865   __ bind(allocate_monitor);
4866 
4867   // allocate one if there's no free slot
4868   { Label loop;
4869     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4870 
4871     // 1. compute new pointers
4872 
4873 #ifdef AARCH64
4874     __ check_extended_sp(Rtemp);
4875     __ sub(SP, SP, entry_size);                  // adjust extended SP
4876     __ mov(Rtemp, SP);
4877     __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
4878 #endif // AARCH64
4879 
4880     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4881                                                  // old monitor block top / expression stack bottom
4882 
4883     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4884     __ check_stack_top_on_expansion();
4885 
4886     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4887 
4888     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4889 
4890     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4891                                                  // set new monitor block top
4892 
4893     // 2. move expression stack contents
4894 
4895     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4896 #ifndef AARCH64
4897     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4898 #endif // !AARCH64
4899     __ b(allocated, eq);
4900 
4901     __ bind(loop);
4902 #ifdef AARCH64
4903     __ ldr(Rtemp, Address(R2_tmp, entry_size));             // load expression stack word from old location
4904 #endif // AARCH64
4905     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4906                                                             // and advance to next word
4907     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4908 #ifndef AARCH64
4909     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4910 #endif // !AARCH64
4911     __ b(loop, ne);                                         // if not at bottom then copy next word
4912   }
4913 
4914   // call run-time routine
4915 
4916   // Rentry: points to monitor entry
4917   __ bind(allocated);
4918 
4919   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4920   // The object has already been poped from the stack, so the expression stack looks correct.
4921   __ add(Rbcp, Rbcp, 1);
4922 
4923   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
4924   __ lock_object(Rentry);
4925 
4926   // check to make sure this monitor doesn't cause stack overflow after locking
4927   __ save_bcp();  // in case of exception
4928   __ arm_stack_overflow_check(0, Rtemp);
4929 
4930   // The bcp has already been incremented. Just need to dispatch to next instruction.
4931   __ dispatch_next(vtos);
4932 }
4933 
4934 
4935 void TemplateTable::monitorexit() {
4936   transition(atos, vtos);
4937 
4938   const Register Robj = R0_tos;
4939   const Register Rcur = R1_tmp;
4940   const Register Rbottom = R2_tmp;
4941   const Register Rcur_obj = Rtemp;
4942 
4943   // check for NULL object
4944   __ null_check(Robj, Rtemp);
4945 
4946   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4947   Label found, throw_exception;
4948 
4949   // find matching slot
4950   { Label loop;
4951     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4952 
4953     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4954                                  // points to current entry, starting with top-most entry
4955     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4956                                  // points to word before bottom of monitor block
4957 
4958     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4959 #ifndef AARCH64
4960     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4961                                                  // prefetch monitor's object for the first iteration
4962 #endif // !AARCH64
4963     __ b(throw_exception, eq);                   // throw exception if there are now monitors
4964 
4965     __ bind(loop);
4966 #ifdef AARCH64
4967     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4968 #endif // AARCH64
4969     // check if current entry is for same object
4970     __ cmp(Rcur_obj, Robj);
4971     __ b(found, eq);                             // if same object then stop searching
4972     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4973     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4974 #ifndef AARCH64
4975     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4976 #endif // !AARCH64
4977     __ b (loop, ne);                             // if not at bottom then check this entry
4978   }
4979 
4980   // error handling. Unlocking was not block-structured
4981   __ bind(throw_exception);
4982   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4983   __ should_not_reach_here();
4984 
4985   // call run-time routine
4986   // Rcur: points to monitor entry
4987   __ bind(found);
4988   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
4989   __ unlock_object(Rcur);
4990   __ pop_ptr(Robj);                              // discard object
4991 }
4992 
4993 
4994 //----------------------------------------------------------------------------------------------------
4995 // Wide instructions
4996 
4997 void TemplateTable::wide() {
4998   transition(vtos, vtos);
4999   __ ldrb(R3_bytecode, at_bcp(1));
5000 
5001   InlinedAddress Ltable((address)Interpreter::_wentry_point);
5002   __ ldr_literal(Rtemp, Ltable);
5003   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
5004 
5005   __ nop(); // to avoid filling CPU pipeline with invalid instructions
5006   __ nop();
5007   __ bind_literal(Ltable);
5008 }
5009 
5010 
5011 //----------------------------------------------------------------------------------------------------
5012 // Multi arrays
5013 
5014 void TemplateTable::multianewarray() {
5015   transition(vtos, atos);
5016   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
5017 
5018   // last dim is on top of stack; we want address of first one:
5019   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
5020   // the latter wordSize to point to the beginning of the array.
5021   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5022   __ sub(R1, Rtemp, wordSize);
5023 
5024   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
5025   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5026   // MacroAssembler::StoreStore useless (included in the runtime exit path)
5027 }