1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "interpreter/interp_masm.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/cpCache.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 
  43 #define __ _masm->
  44 
  45 //----------------------------------------------------------------------------------------------------
  46 // Platform-dependent initialization
  47 
  48 void TemplateTable::pd_initialize() {
  49   // No arm specific initialization
  50 }
  51 
  52 //----------------------------------------------------------------------------------------------------
  53 // Address computation
  54 
  55 // local variables
  56 static inline Address iaddress(int n)            {
  57   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n)            { return iaddress(n + 1); }
  61 #ifndef AARCH64
  62 static inline Address haddress(int n)            { return iaddress(n + 0); }
  63 #endif // !AARCH64
  64 
  65 static inline Address faddress(int n)            { return iaddress(n); }
  66 static inline Address daddress(int n)            { return laddress(n); }
  67 static inline Address aaddress(int n)            { return iaddress(n); }
  68 
  69 
  70 void TemplateTable::get_local_base_addr(Register r, Register index) {
  71   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  72 }
  73 
  74 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  75 #ifdef AARCH64
  76   get_local_base_addr(scratch, index);
  77   return Address(scratch);
  78 #else
  79   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  80 #endif // AARCH64
  81 }
  82 
  83 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  84   return load_iaddress(index, scratch);
  85 }
  86 
  87 Address TemplateTable::load_faddress(Register index, Register scratch) {
  88 #ifdef __SOFTFP__
  89   return load_iaddress(index, scratch);
  90 #else
  91   get_local_base_addr(scratch, index);
  92   return Address(scratch);
  93 #endif // __SOFTFP__
  94 }
  95 
  96 Address TemplateTable::load_daddress(Register index, Register scratch) {
  97   get_local_base_addr(scratch, index);
  98   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  99 }
 100 
 101 // At top of Java expression stack which may be different than SP.
 102 // It isn't for category 1 objects.
 103 static inline Address at_tos() {
 104   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
 105 }
 106 
 107 static inline Address at_tos_p1() {
 108   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 109 }
 110 
 111 static inline Address at_tos_p2() {
 112   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 113 }
 114 
 115 
 116 // 32-bit ARM:
 117 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 118 // separate ldr instructions (supports nonadjacent values).
 119 // Used for longs in all modes, and for doubles in SOFTFP mode.
 120 //
 121 // AArch64: loads long local into R0_tos.
 122 //
 123 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 124   const Register Rlocal_base = tmp;
 125   assert_different_registers(Rlocal_index, tmp);
 126 
 127   get_local_base_addr(Rlocal_base, Rlocal_index);
 128 #ifdef AARCH64
 129   __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 130 #else
 131   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 132   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 133 #endif // AARCH64
 134 }
 135 
 136 
 137 // 32-bit ARM:
 138 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 139 // separate str instructions (supports nonadjacent values).
 140 // Used for longs in all modes, and for doubles in SOFTFP mode
 141 //
 142 // AArch64: stores R0_tos to long local.
 143 //
 144 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 145   const Register Rlocal_base = tmp;
 146   assert_different_registers(Rlocal_index, tmp);
 147 
 148   get_local_base_addr(Rlocal_base, Rlocal_index);
 149 #ifdef AARCH64
 150   __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 151 #else
 152   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 153   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 154 #endif // AARCH64
 155 }
 156 
 157 // Returns address of Java array element using temp register as address base.
 158 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 159   int logElemSize = exact_log2(type2aelembytes(elemType));
 160   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 161   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 162 }
 163 
 164 //----------------------------------------------------------------------------------------------------
 165 // Condition conversion
 166 AsmCondition convNegCond(TemplateTable::Condition cc) {
 167   switch (cc) {
 168     case TemplateTable::equal        : return ne;
 169     case TemplateTable::not_equal    : return eq;
 170     case TemplateTable::less         : return ge;
 171     case TemplateTable::less_equal   : return gt;
 172     case TemplateTable::greater      : return le;
 173     case TemplateTable::greater_equal: return lt;
 174   }
 175   ShouldNotReachHere();
 176   return nv;
 177 }
 178 
 179 //----------------------------------------------------------------------------------------------------
 180 // Miscelaneous helper routines
 181 
 182 // Store an oop (or NULL) at the address described by obj.
 183 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 184 // Also destroys new_val and obj.base().
 185 static void do_oop_store(InterpreterMacroAssembler* _masm,
 186                          Address obj,
 187                          Register new_val,
 188                          Register tmp1,
 189                          Register tmp2,
 190                          Register tmp3,
 191                          bool is_null,
 192                          DecoratorSet decorators = 0) {
 193 
 194   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 195   if (is_null) {
 196     __ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
 197   } else {
 198     __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
 199   }
 200 }
 201 
 202 static void do_oop_load(InterpreterMacroAssembler* _masm,
 203                         Register dst,
 204                         Address obj,
 205                         DecoratorSet decorators = 0) {
 206   __ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
 207 }
 208 
 209 Address TemplateTable::at_bcp(int offset) {
 210   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 211   return Address(Rbcp, offset);
 212 }
 213 
 214 
 215 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
 216 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 217                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 218                                    int byte_no) {
 219   assert_different_registers(bc_reg, temp_reg);
 220   if (!RewriteBytecodes)  return;
 221   Label L_patch_done;
 222 
 223   switch (bc) {
 224   case Bytecodes::_fast_aputfield:
 225   case Bytecodes::_fast_bputfield:
 226   case Bytecodes::_fast_zputfield:
 227   case Bytecodes::_fast_cputfield:
 228   case Bytecodes::_fast_dputfield:
 229   case Bytecodes::_fast_fputfield:
 230   case Bytecodes::_fast_iputfield:
 231   case Bytecodes::_fast_lputfield:
 232   case Bytecodes::_fast_sputfield:
 233     {
 234       // We skip bytecode quickening for putfield instructions when
 235       // the put_code written to the constant pool cache is zero.
 236       // This is required so that every execution of this instruction
 237       // calls out to InterpreterRuntime::resolve_get_put to do
 238       // additional, required work.
 239       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 240       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 241       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 242       __ mov(bc_reg, bc);
 243       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 244     }
 245     break;
 246   default:
 247     assert(byte_no == -1, "sanity");
 248     // the pair bytecodes have already done the load.
 249     if (load_bc_into_bc_reg) {
 250       __ mov(bc_reg, bc);
 251     }
 252   }
 253 
 254   if (__ can_post_breakpoint()) {
 255     Label L_fast_patch;
 256     // if a breakpoint is present we can't rewrite the stream directly
 257     __ ldrb(temp_reg, at_bcp(0));
 258     __ cmp(temp_reg, Bytecodes::_breakpoint);
 259     __ b(L_fast_patch, ne);
 260     if (bc_reg != R3) {
 261       __ mov(R3, bc_reg);
 262     }
 263     __ mov(R1, Rmethod);
 264     __ mov(R2, Rbcp);
 265     // Let breakpoint table handling rewrite to quicker bytecode
 266     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 267     __ b(L_patch_done);
 268     __ bind(L_fast_patch);
 269   }
 270 
 271 #ifdef ASSERT
 272   Label L_okay;
 273   __ ldrb(temp_reg, at_bcp(0));
 274   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 275   __ b(L_okay, eq);
 276   __ cmp(temp_reg, bc_reg);
 277   __ b(L_okay, eq);
 278   __ stop("patching the wrong bytecode");
 279   __ bind(L_okay);
 280 #endif
 281 
 282   // patch bytecode
 283   __ strb(bc_reg, at_bcp(0));
 284   __ bind(L_patch_done);
 285 }
 286 
 287 //----------------------------------------------------------------------------------------------------
 288 // Individual instructions
 289 
 290 void TemplateTable::nop() {
 291   transition(vtos, vtos);
 292   // nothing to do
 293 }
 294 
 295 void TemplateTable::shouldnotreachhere() {
 296   transition(vtos, vtos);
 297   __ stop("shouldnotreachhere bytecode");
 298 }
 299 
 300 
 301 
 302 void TemplateTable::aconst_null() {
 303   transition(vtos, atos);
 304   __ mov(R0_tos, 0);
 305 }
 306 
 307 
 308 void TemplateTable::iconst(int value) {
 309   transition(vtos, itos);
 310   __ mov_slow(R0_tos, value);
 311 }
 312 
 313 
 314 void TemplateTable::lconst(int value) {
 315   transition(vtos, ltos);
 316   assert((value == 0) || (value == 1), "unexpected long constant");
 317   __ mov(R0_tos, value);
 318 #ifndef AARCH64
 319   __ mov(R1_tos_hi, 0);
 320 #endif // !AARCH64
 321 }
 322 
 323 
 324 void TemplateTable::fconst(int value) {
 325   transition(vtos, ftos);
 326 #ifdef AARCH64
 327   switch(value) {
 328   case 0:   __ fmov_sw(S0_tos, ZR);    break;
 329   case 1:   __ fmov_s (S0_tos, 0x70);  break;
 330   case 2:   __ fmov_s (S0_tos, 0x00);  break;
 331   default:  ShouldNotReachHere();      break;
 332   }
 333 #else
 334   const int zero = 0;         // 0.0f
 335   const int one = 0x3f800000; // 1.0f
 336   const int two = 0x40000000; // 2.0f
 337 
 338   switch(value) {
 339   case 0:   __ mov(R0_tos, zero);   break;
 340   case 1:   __ mov(R0_tos, one);    break;
 341   case 2:   __ mov(R0_tos, two);    break;
 342   default:  ShouldNotReachHere();   break;
 343   }
 344 
 345 #ifndef __SOFTFP__
 346   __ fmsr(S0_tos, R0_tos);
 347 #endif // !__SOFTFP__
 348 #endif // AARCH64
 349 }
 350 
 351 
 352 void TemplateTable::dconst(int value) {
 353   transition(vtos, dtos);
 354 #ifdef AARCH64
 355   switch(value) {
 356   case 0:   __ fmov_dx(D0_tos, ZR);    break;
 357   case 1:   __ fmov_d (D0_tos, 0x70);  break;
 358   default:  ShouldNotReachHere();      break;
 359   }
 360 #else
 361   const int one_lo = 0;            // low part of 1.0
 362   const int one_hi = 0x3ff00000;   // high part of 1.0
 363 
 364   if (value == 0) {
 365 #ifdef __SOFTFP__
 366     __ mov(R0_tos_lo, 0);
 367     __ mov(R1_tos_hi, 0);
 368 #else
 369     __ mov(R0_tmp, 0);
 370     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 371 #endif // __SOFTFP__
 372   } else if (value == 1) {
 373     __ mov(R0_tos_lo, one_lo);
 374     __ mov_slow(R1_tos_hi, one_hi);
 375 #ifndef __SOFTFP__
 376     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 377 #endif // !__SOFTFP__
 378   } else {
 379     ShouldNotReachHere();
 380   }
 381 #endif // AARCH64
 382 }
 383 
 384 
 385 void TemplateTable::bipush() {
 386   transition(vtos, itos);
 387   __ ldrsb(R0_tos, at_bcp(1));
 388 }
 389 
 390 
 391 void TemplateTable::sipush() {
 392   transition(vtos, itos);
 393   __ ldrsb(R0_tmp, at_bcp(1));
 394   __ ldrb(R1_tmp, at_bcp(2));
 395   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 396 }
 397 
 398 
 399 void TemplateTable::ldc(bool wide) {
 400   transition(vtos, vtos);
 401   Label fastCase, Condy, Done;
 402 
 403   const Register Rindex = R1_tmp;
 404   const Register Rcpool = R2_tmp;
 405   const Register Rtags  = R3_tmp;
 406   const Register RtagType = R3_tmp;
 407 
 408   if (wide) {
 409     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 410   } else {
 411     __ ldrb(Rindex, at_bcp(1));
 412   }
 413   __ get_cpool_and_tags(Rcpool, Rtags);
 414 
 415   const int base_offset = ConstantPool::header_size() * wordSize;
 416   const int tags_offset = Array<u1>::base_offset_in_bytes();
 417 
 418   // get const type
 419   __ add(Rtemp, Rtags, tags_offset);
 420 #ifdef AARCH64
 421   __ add(Rtemp, Rtemp, Rindex);
 422   __ ldarb(RtagType, Rtemp);  // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
 423 #else
 424   __ ldrb(RtagType, Address(Rtemp, Rindex));
 425   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 426 #endif // AARCH64
 427 
 428   // unresolved class - get the resolved class
 429   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 430 
 431   // unresolved class in error (resolution failed) - call into runtime
 432   // so that the same error from first resolution attempt is thrown.
 433 #ifdef AARCH64
 434   __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
 435   __ cond_cmp(RtagType, Rtemp, ne);
 436 #else
 437   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 438 #endif // AARCH64
 439 
 440   // resolved class - need to call vm to get java mirror of the class
 441   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 442 
 443   __ b(fastCase, ne);
 444 
 445   // slow case - call runtime
 446   __ mov(R1, wide);
 447   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 448   __ push(atos);
 449   __ b(Done);
 450 
 451   // int, float, String
 452   __ bind(fastCase);
 453 
 454   __ cmp(RtagType, JVM_CONSTANT_Integer);
 455   __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 456   __ b(Condy, ne);
 457 
 458   // itos, ftos
 459   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 460   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 461 
 462   // floats and ints are placed on stack in the same way, so
 463   // we can use push(itos) to transfer float value without VFP
 464   __ push(itos);
 465   __ b(Done);
 466 
 467   __ bind(Condy);
 468   condy_helper(Done);
 469 
 470   __ bind(Done);
 471 }
 472 
 473 // Fast path for caching oop constants.
 474 void TemplateTable::fast_aldc(bool wide) {
 475   transition(vtos, atos);
 476   int index_size = wide ? sizeof(u2) : sizeof(u1);
 477   Label resolved;
 478 
 479   // We are resolved if the resolved reference cache entry contains a
 480   // non-null object (CallSite, etc.)
 481   assert_different_registers(R0_tos, R2_tmp);
 482   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 483   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 484   __ cbnz(R0_tos, resolved);
 485 
 486   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 487 
 488   // first time invocation - must resolve first
 489   __ mov(R1, (int)bytecode());
 490   __ call_VM(R0_tos, entry, R1);
 491   __ bind(resolved);
 492 
 493   { // Check for the null sentinel.
 494     // If we just called the VM, that already did the mapping for us,
 495     // but it's harmless to retry.
 496     Label notNull;
 497     Register result = R0;
 498     Register tmp = R1;
 499     Register rarg = R2;
 500 
 501     // Stash null_sentinel address to get its value later
 502     __ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
 503     __ ldr(tmp, Address(rarg));
 504     __ cmp(result, tmp);
 505     __ b(notNull, ne);
 506     __ mov(result, 0);  // NULL object reference
 507     __ bind(notNull);
 508   }
 509 
 510   if (VerifyOops) {
 511     __ verify_oop(R0_tos);
 512   }
 513 }
 514 
 515 void TemplateTable::ldc2_w() {
 516   transition(vtos, vtos);
 517   const Register Rtags  = R2_tmp;
 518   const Register Rindex = R3_tmp;
 519   const Register Rcpool = R4_tmp;
 520   const Register Rbase  = R5_tmp;
 521 
 522   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 523 
 524   __ get_cpool_and_tags(Rcpool, Rtags);
 525   const int base_offset = ConstantPool::header_size() * wordSize;
 526   const int tags_offset = Array<u1>::base_offset_in_bytes();
 527 
 528   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 529 
 530   Label Condy, exit;
 531 #ifdef __ABI_HARD__
 532   Label Long;
 533   // get type from tags
 534   __ add(Rtemp, Rtags, tags_offset);
 535   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 536   __ cmp(Rtemp, JVM_CONSTANT_Double);
 537   __ b(Long, ne);
 538   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 539 
 540   __ push(dtos);
 541   __ b(exit);
 542   __ bind(Long);
 543 #endif
 544 
 545   __ cmp(Rtemp, JVM_CONSTANT_Long);
 546   __ b(Condy, ne);
 547 #ifdef AARCH64
 548   __ ldr(R0_tos, Address(Rbase, base_offset));
 549 #else
 550   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 551   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 552 #endif // AARCH64
 553   __ push(ltos);
 554   __ b(exit);
 555 
 556   __ bind(Condy);
 557   condy_helper(exit);
 558 
 559   __ bind(exit);
 560 }
 561 
 562 
 563 void TemplateTable::condy_helper(Label& Done)
 564 {
 565   Register obj   = R0_tmp;
 566   Register rtmp  = R1_tmp;
 567   Register flags = R2_tmp;
 568   Register off   = R3_tmp;
 569 
 570   __ mov(rtmp, (int) bytecode());
 571   __ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp);
 572   __ get_vm_result_2(flags, rtmp);
 573 
 574   // VMr = obj = base address to find primitive value to push
 575   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 576   __ mov(off, flags);
 577 
 578 #ifdef AARCH64
 579   __ andr(off, off, (unsigned)ConstantPoolCacheEntry::field_index_mask);
 580 #else
 581   __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 582   __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 583 #endif
 584 
 585   const Address field(obj, off);
 586 
 587   __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
 588   // Make sure we don't need to mask flags after the above shift
 589   ConstantPoolCacheEntry::verify_tos_state_shift();
 590 
 591   switch (bytecode()) {
 592     case Bytecodes::_ldc:
 593     case Bytecodes::_ldc_w:
 594       {
 595         // tos in (itos, ftos, stos, btos, ctos, ztos)
 596         Label notIntFloat, notShort, notByte, notChar, notBool;
 597         __ cmp(flags, itos);
 598         __ cond_cmp(flags, ftos, ne);
 599         __ b(notIntFloat, ne);
 600         __ ldr(R0_tos, field);
 601         __ push(itos);
 602         __ b(Done);
 603 
 604         __ bind(notIntFloat);
 605         __ cmp(flags, stos);
 606         __ b(notShort, ne);
 607         __ ldrsh(R0_tos, field);
 608         __ push(stos);
 609         __ b(Done);
 610 
 611         __ bind(notShort);
 612         __ cmp(flags, btos);
 613         __ b(notByte, ne);
 614         __ ldrsb(R0_tos, field);
 615         __ push(btos);
 616         __ b(Done);
 617 
 618         __ bind(notByte);
 619         __ cmp(flags, ctos);
 620         __ b(notChar, ne);
 621         __ ldrh(R0_tos, field);
 622         __ push(ctos);
 623         __ b(Done);
 624 
 625         __ bind(notChar);
 626         __ cmp(flags, ztos);
 627         __ b(notBool, ne);
 628         __ ldrsb(R0_tos, field);
 629         __ push(ztos);
 630         __ b(Done);
 631 
 632         __ bind(notBool);
 633         break;
 634       }
 635 
 636     case Bytecodes::_ldc2_w:
 637       {
 638         Label notLongDouble;
 639         __ cmp(flags, ltos);
 640         __ cond_cmp(flags, dtos, ne);
 641         __ b(notLongDouble, ne);
 642 
 643 #ifdef AARCH64
 644         __ ldr(R0_tos, field);
 645 #else
 646         __ add(rtmp, obj, wordSize);
 647         __ ldr(R0_tos_lo, Address(obj, off));
 648         __ ldr(R1_tos_hi, Address(rtmp, off));
 649 #endif
 650         __ push(ltos);
 651         __ b(Done);
 652 
 653         __ bind(notLongDouble);
 654 
 655         break;
 656       }
 657 
 658     default:
 659       ShouldNotReachHere();
 660     }
 661 
 662     __ stop("bad ldc/condy");
 663 }
 664 
 665 
 666 void TemplateTable::locals_index(Register reg, int offset) {
 667   __ ldrb(reg, at_bcp(offset));
 668 }
 669 
 670 void TemplateTable::iload() {
 671   iload_internal();
 672 }
 673 
 674 void TemplateTable::nofast_iload() {
 675   iload_internal(may_not_rewrite);
 676 }
 677 
 678 void TemplateTable::iload_internal(RewriteControl rc) {
 679   transition(vtos, itos);
 680 
 681   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 682     Label rewrite, done;
 683     const Register next_bytecode = R1_tmp;
 684     const Register target_bytecode = R2_tmp;
 685 
 686     // get next byte
 687     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 688     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 689     // last two iloads in a pair.  Comparing against fast_iload means that
 690     // the next bytecode is neither an iload or a caload, and therefore
 691     // an iload pair.
 692     __ cmp(next_bytecode, Bytecodes::_iload);
 693     __ b(done, eq);
 694 
 695     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 696     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 697     __ b(rewrite, eq);
 698 
 699     // if _caload, rewrite to fast_icaload
 700     __ cmp(next_bytecode, Bytecodes::_caload);
 701     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 702     __ b(rewrite, eq);
 703 
 704     // rewrite so iload doesn't check again.
 705     __ mov(target_bytecode, Bytecodes::_fast_iload);
 706 
 707     // rewrite
 708     // R2: fast bytecode
 709     __ bind(rewrite);
 710     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 711     __ bind(done);
 712   }
 713 
 714   // Get the local value into tos
 715   const Register Rlocal_index = R1_tmp;
 716   locals_index(Rlocal_index);
 717   Address local = load_iaddress(Rlocal_index, Rtemp);
 718   __ ldr_s32(R0_tos, local);
 719 }
 720 
 721 
 722 void TemplateTable::fast_iload2() {
 723   transition(vtos, itos);
 724   const Register Rlocal_index = R1_tmp;
 725 
 726   locals_index(Rlocal_index);
 727   Address local = load_iaddress(Rlocal_index, Rtemp);
 728   __ ldr_s32(R0_tos, local);
 729   __ push(itos);
 730 
 731   locals_index(Rlocal_index, 3);
 732   local = load_iaddress(Rlocal_index, Rtemp);
 733   __ ldr_s32(R0_tos, local);
 734 }
 735 
 736 void TemplateTable::fast_iload() {
 737   transition(vtos, itos);
 738   const Register Rlocal_index = R1_tmp;
 739 
 740   locals_index(Rlocal_index);
 741   Address local = load_iaddress(Rlocal_index, Rtemp);
 742   __ ldr_s32(R0_tos, local);
 743 }
 744 
 745 
 746 void TemplateTable::lload() {
 747   transition(vtos, ltos);
 748   const Register Rlocal_index = R2_tmp;
 749 
 750   locals_index(Rlocal_index);
 751   load_category2_local(Rlocal_index, R3_tmp);
 752 }
 753 
 754 
 755 void TemplateTable::fload() {
 756   transition(vtos, ftos);
 757   const Register Rlocal_index = R2_tmp;
 758 
 759   // Get the local value into tos
 760   locals_index(Rlocal_index);
 761   Address local = load_faddress(Rlocal_index, Rtemp);
 762 #ifdef __SOFTFP__
 763   __ ldr(R0_tos, local);
 764 #else
 765   __ ldr_float(S0_tos, local);
 766 #endif // __SOFTFP__
 767 }
 768 
 769 
 770 void TemplateTable::dload() {
 771   transition(vtos, dtos);
 772   const Register Rlocal_index = R2_tmp;
 773 
 774   locals_index(Rlocal_index);
 775 
 776 #ifdef __SOFTFP__
 777   load_category2_local(Rlocal_index, R3_tmp);
 778 #else
 779   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 780 #endif // __SOFTFP__
 781 }
 782 
 783 
 784 void TemplateTable::aload() {
 785   transition(vtos, atos);
 786   const Register Rlocal_index = R1_tmp;
 787 
 788   locals_index(Rlocal_index);
 789   Address local = load_aaddress(Rlocal_index, Rtemp);
 790   __ ldr(R0_tos, local);
 791 }
 792 
 793 
 794 void TemplateTable::locals_index_wide(Register reg) {
 795   assert_different_registers(reg, Rtemp);
 796   __ ldrb(Rtemp, at_bcp(2));
 797   __ ldrb(reg, at_bcp(3));
 798   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 799 }
 800 
 801 
 802 void TemplateTable::wide_iload() {
 803   transition(vtos, itos);
 804   const Register Rlocal_index = R2_tmp;
 805 
 806   locals_index_wide(Rlocal_index);
 807   Address local = load_iaddress(Rlocal_index, Rtemp);
 808   __ ldr_s32(R0_tos, local);
 809 }
 810 
 811 
 812 void TemplateTable::wide_lload() {
 813   transition(vtos, ltos);
 814   const Register Rlocal_index = R2_tmp;
 815   const Register Rlocal_base = R3_tmp;
 816 
 817   locals_index_wide(Rlocal_index);
 818   load_category2_local(Rlocal_index, R3_tmp);
 819 }
 820 
 821 
 822 void TemplateTable::wide_fload() {
 823   transition(vtos, ftos);
 824   const Register Rlocal_index = R2_tmp;
 825 
 826   locals_index_wide(Rlocal_index);
 827   Address local = load_faddress(Rlocal_index, Rtemp);
 828 #ifdef __SOFTFP__
 829   __ ldr(R0_tos, local);
 830 #else
 831   __ ldr_float(S0_tos, local);
 832 #endif // __SOFTFP__
 833 }
 834 
 835 
 836 void TemplateTable::wide_dload() {
 837   transition(vtos, dtos);
 838   const Register Rlocal_index = R2_tmp;
 839 
 840   locals_index_wide(Rlocal_index);
 841 #ifdef __SOFTFP__
 842   load_category2_local(Rlocal_index, R3_tmp);
 843 #else
 844   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 845 #endif // __SOFTFP__
 846 }
 847 
 848 
 849 void TemplateTable::wide_aload() {
 850   transition(vtos, atos);
 851   const Register Rlocal_index = R2_tmp;
 852 
 853   locals_index_wide(Rlocal_index);
 854   Address local = load_aaddress(Rlocal_index, Rtemp);
 855   __ ldr(R0_tos, local);
 856 }
 857 
 858 void TemplateTable::index_check(Register array, Register index) {
 859   // Pop ptr into array
 860   __ pop_ptr(array);
 861   index_check_without_pop(array, index);
 862 }
 863 
 864 void TemplateTable::index_check_without_pop(Register array, Register index) {
 865   assert_different_registers(array, index, Rtemp);
 866   // check array
 867   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 868   // check index
 869   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 870   __ cmp_32(index, Rtemp);
 871   if (index != R4_ArrayIndexOutOfBounds_index) {
 872     // convention with generate_ArrayIndexOutOfBounds_handler()
 873     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 874   }
 875   __ mov(R1, array, hs);
 876   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 877 }
 878 
 879 
 880 void TemplateTable::iaload() {
 881   transition(itos, itos);
 882   const Register Rarray = R1_tmp;
 883   const Register Rindex = R0_tos;
 884 
 885   index_check(Rarray, Rindex);
 886   __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
 887 }
 888 
 889 
 890 void TemplateTable::laload() {
 891   transition(itos, ltos);
 892   const Register Rarray = R1_tmp;
 893   const Register Rindex = R0_tos;
 894 
 895   index_check(Rarray, Rindex);
 896 
 897 #ifdef AARCH64
 898   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 899 #else
 900   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 901   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
 902   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 903 #endif // AARCH64
 904 }
 905 
 906 
 907 void TemplateTable::faload() {
 908   transition(itos, ftos);
 909   const Register Rarray = R1_tmp;
 910   const Register Rindex = R0_tos;
 911 
 912   index_check(Rarray, Rindex);
 913 
 914   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
 915 #ifdef __SOFTFP__
 916   __ ldr(R0_tos, addr);
 917 #else
 918   __ ldr_float(S0_tos, addr);
 919 #endif // __SOFTFP__
 920 }
 921 
 922 
 923 void TemplateTable::daload() {
 924   transition(itos, dtos);
 925   const Register Rarray = R1_tmp;
 926   const Register Rindex = R0_tos;
 927 
 928   index_check(Rarray, Rindex);
 929 
 930 #ifdef __SOFTFP__
 931   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
 932   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
 933   __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
 934 #else
 935   __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
 936 #endif // __SOFTFP__
 937 }
 938 
 939 
 940 void TemplateTable::aaload() {
 941   transition(itos, atos);
 942   const Register Rarray = R1_tmp;
 943   const Register Rindex = R0_tos;
 944 
 945   index_check(Rarray, Rindex);
 946   do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IN_HEAP_ARRAY);
 947 }
 948 
 949 
 950 void TemplateTable::baload() {
 951   transition(itos, itos);
 952   const Register Rarray = R1_tmp;
 953   const Register Rindex = R0_tos;
 954 
 955   index_check(Rarray, Rindex);
 956   __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
 957 }
 958 
 959 
 960 void TemplateTable::caload() {
 961   transition(itos, itos);
 962   const Register Rarray = R1_tmp;
 963   const Register Rindex = R0_tos;
 964 
 965   index_check(Rarray, Rindex);
 966   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 967 }
 968 
 969 
 970 // iload followed by caload frequent pair
 971 void TemplateTable::fast_icaload() {
 972   transition(vtos, itos);
 973   const Register Rlocal_index = R1_tmp;
 974   const Register Rarray = R1_tmp;
 975   const Register Rindex = R4_tmp; // index_check prefers index on R4
 976   assert_different_registers(Rlocal_index, Rindex);
 977   assert_different_registers(Rarray, Rindex);
 978 
 979   // load index out of locals
 980   locals_index(Rlocal_index);
 981   Address local = load_iaddress(Rlocal_index, Rtemp);
 982   __ ldr_s32(Rindex, local);
 983 
 984   // get array element
 985   index_check(Rarray, Rindex);
 986   __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
 987 }
 988 
 989 
 990 void TemplateTable::saload() {
 991   transition(itos, itos);
 992   const Register Rarray = R1_tmp;
 993   const Register Rindex = R0_tos;
 994 
 995   index_check(Rarray, Rindex);
 996   __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
 997 }
 998 
 999 
1000 void TemplateTable::iload(int n) {
1001   transition(vtos, itos);
1002   __ ldr_s32(R0_tos, iaddress(n));
1003 }
1004 
1005 
1006 void TemplateTable::lload(int n) {
1007   transition(vtos, ltos);
1008 #ifdef AARCH64
1009   __ ldr(R0_tos, laddress(n));
1010 #else
1011   __ ldr(R0_tos_lo, laddress(n));
1012   __ ldr(R1_tos_hi, haddress(n));
1013 #endif // AARCH64
1014 }
1015 
1016 
1017 void TemplateTable::fload(int n) {
1018   transition(vtos, ftos);
1019 #ifdef __SOFTFP__
1020   __ ldr(R0_tos, faddress(n));
1021 #else
1022   __ ldr_float(S0_tos, faddress(n));
1023 #endif // __SOFTFP__
1024 }
1025 
1026 
1027 void TemplateTable::dload(int n) {
1028   transition(vtos, dtos);
1029 #ifdef __SOFTFP__
1030   __ ldr(R0_tos_lo, laddress(n));
1031   __ ldr(R1_tos_hi, haddress(n));
1032 #else
1033   __ ldr_double(D0_tos, daddress(n));
1034 #endif // __SOFTFP__
1035 }
1036 
1037 
1038 void TemplateTable::aload(int n) {
1039   transition(vtos, atos);
1040   __ ldr(R0_tos, aaddress(n));
1041 }
1042 
1043 void TemplateTable::aload_0() {
1044   aload_0_internal();
1045 }
1046 
1047 void TemplateTable::nofast_aload_0() {
1048   aload_0_internal(may_not_rewrite);
1049 }
1050 
1051 void TemplateTable::aload_0_internal(RewriteControl rc) {
1052   transition(vtos, atos);
1053   // According to bytecode histograms, the pairs:
1054   //
1055   // _aload_0, _fast_igetfield
1056   // _aload_0, _fast_agetfield
1057   // _aload_0, _fast_fgetfield
1058   //
1059   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
1060   // bytecode checks if the next bytecode is either _fast_igetfield,
1061   // _fast_agetfield or _fast_fgetfield and then rewrites the
1062   // current bytecode into a pair bytecode; otherwise it rewrites the current
1063   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
1064   //
1065   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
1066   //       otherwise we may miss an opportunity for a pair.
1067   //
1068   // Also rewrite frequent pairs
1069   //   aload_0, aload_1
1070   //   aload_0, iload_1
1071   // These bytecodes with a small amount of code are most profitable to rewrite
1072   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
1073     Label rewrite, done;
1074     const Register next_bytecode = R1_tmp;
1075     const Register target_bytecode = R2_tmp;
1076 
1077     // get next byte
1078     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
1079 
1080     // if _getfield then wait with rewrite
1081     __ cmp(next_bytecode, Bytecodes::_getfield);
1082     __ b(done, eq);
1083 
1084     // if _igetfield then rewrite to _fast_iaccess_0
1085     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1086     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1087     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1088     __ b(rewrite, eq);
1089 
1090     // if _agetfield then rewrite to _fast_aaccess_0
1091     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1092     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1093     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1094     __ b(rewrite, eq);
1095 
1096     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1097     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1098     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1099 
1100     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1101 #ifdef AARCH64
1102     __ mov(Rtemp, Bytecodes::_fast_faccess_0);
1103     __ mov(target_bytecode, Bytecodes::_fast_aload_0);
1104     __ mov(target_bytecode, Rtemp, eq);
1105 #else
1106     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1107     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1108 #endif // AARCH64
1109 
1110     // rewrite
1111     __ bind(rewrite);
1112     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1113 
1114     __ bind(done);
1115   }
1116 
1117   aload(0);
1118 }
1119 
1120 void TemplateTable::istore() {
1121   transition(itos, vtos);
1122   const Register Rlocal_index = R2_tmp;
1123 
1124   locals_index(Rlocal_index);
1125   Address local = load_iaddress(Rlocal_index, Rtemp);
1126   __ str_32(R0_tos, local);
1127 }
1128 
1129 
1130 void TemplateTable::lstore() {
1131   transition(ltos, vtos);
1132   const Register Rlocal_index = R2_tmp;
1133 
1134   locals_index(Rlocal_index);
1135   store_category2_local(Rlocal_index, R3_tmp);
1136 }
1137 
1138 
1139 void TemplateTable::fstore() {
1140   transition(ftos, vtos);
1141   const Register Rlocal_index = R2_tmp;
1142 
1143   locals_index(Rlocal_index);
1144   Address local = load_faddress(Rlocal_index, Rtemp);
1145 #ifdef __SOFTFP__
1146   __ str(R0_tos, local);
1147 #else
1148   __ str_float(S0_tos, local);
1149 #endif // __SOFTFP__
1150 }
1151 
1152 
1153 void TemplateTable::dstore() {
1154   transition(dtos, vtos);
1155   const Register Rlocal_index = R2_tmp;
1156 
1157   locals_index(Rlocal_index);
1158 
1159 #ifdef __SOFTFP__
1160   store_category2_local(Rlocal_index, R3_tmp);
1161 #else
1162   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1163 #endif // __SOFTFP__
1164 }
1165 
1166 
1167 void TemplateTable::astore() {
1168   transition(vtos, vtos);
1169   const Register Rlocal_index = R1_tmp;
1170 
1171   __ pop_ptr(R0_tos);
1172   locals_index(Rlocal_index);
1173   Address local = load_aaddress(Rlocal_index, Rtemp);
1174   __ str(R0_tos, local);
1175 }
1176 
1177 
1178 void TemplateTable::wide_istore() {
1179   transition(vtos, vtos);
1180   const Register Rlocal_index = R2_tmp;
1181 
1182   __ pop_i(R0_tos);
1183   locals_index_wide(Rlocal_index);
1184   Address local = load_iaddress(Rlocal_index, Rtemp);
1185   __ str_32(R0_tos, local);
1186 }
1187 
1188 
1189 void TemplateTable::wide_lstore() {
1190   transition(vtos, vtos);
1191   const Register Rlocal_index = R2_tmp;
1192   const Register Rlocal_base = R3_tmp;
1193 
1194 #ifdef AARCH64
1195   __ pop_l(R0_tos);
1196 #else
1197   __ pop_l(R0_tos_lo, R1_tos_hi);
1198 #endif // AARCH64
1199 
1200   locals_index_wide(Rlocal_index);
1201   store_category2_local(Rlocal_index, R3_tmp);
1202 }
1203 
1204 
1205 void TemplateTable::wide_fstore() {
1206   wide_istore();
1207 }
1208 
1209 
1210 void TemplateTable::wide_dstore() {
1211   wide_lstore();
1212 }
1213 
1214 
1215 void TemplateTable::wide_astore() {
1216   transition(vtos, vtos);
1217   const Register Rlocal_index = R2_tmp;
1218 
1219   __ pop_ptr(R0_tos);
1220   locals_index_wide(Rlocal_index);
1221   Address local = load_aaddress(Rlocal_index, Rtemp);
1222   __ str(R0_tos, local);
1223 }
1224 
1225 
1226 void TemplateTable::iastore() {
1227   transition(itos, vtos);
1228   const Register Rindex = R4_tmp; // index_check prefers index in R4
1229   const Register Rarray = R3_tmp;
1230   // R0_tos: value
1231 
1232   __ pop_i(Rindex);
1233   index_check(Rarray, Rindex);
1234   __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1235 }
1236 
1237 
1238 void TemplateTable::lastore() {
1239   transition(ltos, vtos);
1240   const Register Rindex = R4_tmp; // index_check prefers index in R4
1241   const Register Rarray = R3_tmp;
1242   // R0_tos_lo:R1_tos_hi: value
1243 
1244   __ pop_i(Rindex);
1245   index_check(Rarray, Rindex);
1246 
1247 #ifdef AARCH64
1248   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1249 #else
1250   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1251   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1252   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1253 #endif // AARCH64
1254 }
1255 
1256 
1257 void TemplateTable::fastore() {
1258   transition(ftos, vtos);
1259   const Register Rindex = R4_tmp; // index_check prefers index in R4
1260   const Register Rarray = R3_tmp;
1261   // S0_tos/R0_tos: value
1262 
1263   __ pop_i(Rindex);
1264   index_check(Rarray, Rindex);
1265   Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1266 
1267 #ifdef __SOFTFP__
1268   __ str(R0_tos, addr);
1269 #else
1270   __ str_float(S0_tos, addr);
1271 #endif // __SOFTFP__
1272 }
1273 
1274 
1275 void TemplateTable::dastore() {
1276   transition(dtos, vtos);
1277   const Register Rindex = R4_tmp; // index_check prefers index in R4
1278   const Register Rarray = R3_tmp;
1279   // D0_tos / R0_tos_lo:R1_to_hi: value
1280 
1281   __ pop_i(Rindex);
1282   index_check(Rarray, Rindex);
1283 
1284 #ifdef __SOFTFP__
1285   __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1286   __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1287   __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1288 #else
1289   __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1290 #endif // __SOFTFP__
1291 }
1292 
1293 
1294 void TemplateTable::aastore() {
1295   transition(vtos, vtos);
1296   Label is_null, throw_array_store, done;
1297 
1298   const Register Raddr_1   = R1_tmp;
1299   const Register Rvalue_2  = R2_tmp;
1300   const Register Rarray_3  = R3_tmp;
1301   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1302   const Register Rsub_5    = R5_tmp;
1303   const Register Rsuper_LR = LR_tmp;
1304 
1305   // stack: ..., array, index, value
1306   __ ldr(Rvalue_2, at_tos());     // Value
1307   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1308   __ ldr(Rarray_3, at_tos_p2());  // Array
1309 
1310   index_check_without_pop(Rarray_3, Rindex_4);
1311 
1312   // Compute the array base
1313   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1314 
1315   // do array store check - check for NULL value first
1316   __ cbz(Rvalue_2, is_null);
1317 
1318   // Load subklass
1319   __ load_klass(Rsub_5, Rvalue_2);
1320   // Load superklass
1321   __ load_klass(Rtemp, Rarray_3);
1322   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1323 
1324   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1325   // Come here on success
1326 
1327   // Store value
1328   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1329 
1330   // Now store using the appropriate barrier
1331   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IN_HEAP_ARRAY);
1332   __ b(done);
1333 
1334   __ bind(throw_array_store);
1335 
1336   // Come here on failure of subtype check
1337   __ profile_typecheck_failed(R0_tmp);
1338 
1339   // object is at TOS
1340   __ b(Interpreter::_throw_ArrayStoreException_entry);
1341 
1342   // Have a NULL in Rvalue_2, store NULL at array[index].
1343   __ bind(is_null);
1344   __ profile_null_seen(R0_tmp);
1345 
1346   // Store a NULL
1347   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IN_HEAP_ARRAY);
1348 
1349   // Pop stack arguments
1350   __ bind(done);
1351   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1352 }
1353 
1354 
1355 void TemplateTable::bastore() {
1356   transition(itos, vtos);
1357   const Register Rindex = R4_tmp; // index_check prefers index in R4
1358   const Register Rarray = R3_tmp;
1359   // R0_tos: value
1360 
1361   __ pop_i(Rindex);
1362   index_check(Rarray, Rindex);
1363 
1364   // Need to check whether array is boolean or byte
1365   // since both types share the bastore bytecode.
1366   __ load_klass(Rtemp, Rarray);
1367   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1368   Label L_skip;
1369   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1370   __ b(L_skip, eq);
1371   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1372   __ bind(L_skip);
1373   __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1374 }
1375 
1376 
1377 void TemplateTable::castore() {
1378   transition(itos, vtos);
1379   const Register Rindex = R4_tmp; // index_check prefers index in R4
1380   const Register Rarray = R3_tmp;
1381   // R0_tos: value
1382 
1383   __ pop_i(Rindex);
1384   index_check(Rarray, Rindex);
1385 
1386   __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1387 }
1388 
1389 
1390 void TemplateTable::sastore() {
1391   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1392            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1393          "base offsets for char and short should be equal");
1394   castore();
1395 }
1396 
1397 
1398 void TemplateTable::istore(int n) {
1399   transition(itos, vtos);
1400   __ str_32(R0_tos, iaddress(n));
1401 }
1402 
1403 
1404 void TemplateTable::lstore(int n) {
1405   transition(ltos, vtos);
1406 #ifdef AARCH64
1407   __ str(R0_tos, laddress(n));
1408 #else
1409   __ str(R0_tos_lo, laddress(n));
1410   __ str(R1_tos_hi, haddress(n));
1411 #endif // AARCH64
1412 }
1413 
1414 
1415 void TemplateTable::fstore(int n) {
1416   transition(ftos, vtos);
1417 #ifdef __SOFTFP__
1418   __ str(R0_tos, faddress(n));
1419 #else
1420   __ str_float(S0_tos, faddress(n));
1421 #endif // __SOFTFP__
1422 }
1423 
1424 
1425 void TemplateTable::dstore(int n) {
1426   transition(dtos, vtos);
1427 #ifdef __SOFTFP__
1428   __ str(R0_tos_lo, laddress(n));
1429   __ str(R1_tos_hi, haddress(n));
1430 #else
1431   __ str_double(D0_tos, daddress(n));
1432 #endif // __SOFTFP__
1433 }
1434 
1435 
1436 void TemplateTable::astore(int n) {
1437   transition(vtos, vtos);
1438   __ pop_ptr(R0_tos);
1439   __ str(R0_tos, aaddress(n));
1440 }
1441 
1442 
1443 void TemplateTable::pop() {
1444   transition(vtos, vtos);
1445   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1446 }
1447 
1448 
1449 void TemplateTable::pop2() {
1450   transition(vtos, vtos);
1451   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1452 }
1453 
1454 
1455 void TemplateTable::dup() {
1456   transition(vtos, vtos);
1457   // stack: ..., a
1458   __ load_ptr(0, R0_tmp);
1459   __ push_ptr(R0_tmp);
1460   // stack: ..., a, a
1461 }
1462 
1463 
1464 void TemplateTable::dup_x1() {
1465   transition(vtos, vtos);
1466   // stack: ..., a, b
1467   __ load_ptr(0, R0_tmp);  // load b
1468   __ load_ptr(1, R2_tmp);  // load a
1469   __ store_ptr(1, R0_tmp); // store b
1470   __ store_ptr(0, R2_tmp); // store a
1471   __ push_ptr(R0_tmp);     // push b
1472   // stack: ..., b, a, b
1473 }
1474 
1475 
1476 void TemplateTable::dup_x2() {
1477   transition(vtos, vtos);
1478   // stack: ..., a, b, c
1479   __ load_ptr(0, R0_tmp);   // load c
1480   __ load_ptr(1, R2_tmp);   // load b
1481   __ load_ptr(2, R4_tmp);   // load a
1482 
1483   __ push_ptr(R0_tmp);      // push c
1484 
1485   // stack: ..., a, b, c, c
1486   __ store_ptr(1, R2_tmp);  // store b
1487   __ store_ptr(2, R4_tmp);  // store a
1488   __ store_ptr(3, R0_tmp);  // store c
1489   // stack: ..., c, a, b, c
1490 }
1491 
1492 
1493 void TemplateTable::dup2() {
1494   transition(vtos, vtos);
1495   // stack: ..., a, b
1496   __ load_ptr(1, R0_tmp);  // load a
1497   __ push_ptr(R0_tmp);     // push a
1498   __ load_ptr(1, R0_tmp);  // load b
1499   __ push_ptr(R0_tmp);     // push b
1500   // stack: ..., a, b, a, b
1501 }
1502 
1503 
1504 void TemplateTable::dup2_x1() {
1505   transition(vtos, vtos);
1506 
1507   // stack: ..., a, b, c
1508   __ load_ptr(0, R4_tmp);  // load c
1509   __ load_ptr(1, R2_tmp);  // load b
1510   __ load_ptr(2, R0_tmp);  // load a
1511 
1512   __ push_ptr(R2_tmp);     // push b
1513   __ push_ptr(R4_tmp);     // push c
1514 
1515   // stack: ..., a, b, c, b, c
1516 
1517   __ store_ptr(2, R0_tmp);  // store a
1518   __ store_ptr(3, R4_tmp);  // store c
1519   __ store_ptr(4, R2_tmp);  // store b
1520 
1521   // stack: ..., b, c, a, b, c
1522 }
1523 
1524 
1525 void TemplateTable::dup2_x2() {
1526   transition(vtos, vtos);
1527   // stack: ..., a, b, c, d
1528   __ load_ptr(0, R0_tmp);  // load d
1529   __ load_ptr(1, R2_tmp);  // load c
1530   __ push_ptr(R2_tmp);     // push c
1531   __ push_ptr(R0_tmp);     // push d
1532   // stack: ..., a, b, c, d, c, d
1533   __ load_ptr(4, R4_tmp);  // load b
1534   __ store_ptr(4, R0_tmp); // store d in b
1535   __ store_ptr(2, R4_tmp); // store b in d
1536   // stack: ..., a, d, c, b, c, d
1537   __ load_ptr(5, R4_tmp);  // load a
1538   __ store_ptr(5, R2_tmp); // store c in a
1539   __ store_ptr(3, R4_tmp); // store a in c
1540   // stack: ..., c, d, a, b, c, d
1541 }
1542 
1543 
1544 void TemplateTable::swap() {
1545   transition(vtos, vtos);
1546   // stack: ..., a, b
1547   __ load_ptr(1, R0_tmp);  // load a
1548   __ load_ptr(0, R2_tmp);  // load b
1549   __ store_ptr(0, R0_tmp); // store a in b
1550   __ store_ptr(1, R2_tmp); // store b in a
1551   // stack: ..., b, a
1552 }
1553 
1554 
1555 void TemplateTable::iop2(Operation op) {
1556   transition(itos, itos);
1557   const Register arg1 = R1_tmp;
1558   const Register arg2 = R0_tos;
1559 
1560   __ pop_i(arg1);
1561   switch (op) {
1562     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1563     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1564     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1565     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1566     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1567     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1568 #ifdef AARCH64
1569     case shl  : __ lslv_w (R0_tos, arg1, arg2); break;
1570     case shr  : __ asrv_w (R0_tos, arg1, arg2); break;
1571     case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1572 #else
1573     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1574     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1575     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1576 #endif // AARCH64
1577     default   : ShouldNotReachHere();
1578   }
1579 }
1580 
1581 
1582 void TemplateTable::lop2(Operation op) {
1583   transition(ltos, ltos);
1584 #ifdef AARCH64
1585   const Register arg1 = R1_tmp;
1586   const Register arg2 = R0_tos;
1587 
1588   __ pop_l(arg1);
1589   switch (op) {
1590     case add  : __ add (R0_tos, arg1, arg2); break;
1591     case sub  : __ sub (R0_tos, arg1, arg2); break;
1592     case _and : __ andr(R0_tos, arg1, arg2); break;
1593     case _or  : __ orr (R0_tos, arg1, arg2); break;
1594     case _xor : __ eor (R0_tos, arg1, arg2); break;
1595     default   : ShouldNotReachHere();
1596   }
1597 #else
1598   const Register arg1_lo = R2_tmp;
1599   const Register arg1_hi = R3_tmp;
1600   const Register arg2_lo = R0_tos_lo;
1601   const Register arg2_hi = R1_tos_hi;
1602 
1603   __ pop_l(arg1_lo, arg1_hi);
1604   switch (op) {
1605     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1606     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1607     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1608     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1609     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1610     default : ShouldNotReachHere();
1611   }
1612 #endif // AARCH64
1613 }
1614 
1615 
1616 void TemplateTable::idiv() {
1617   transition(itos, itos);
1618 #ifdef AARCH64
1619   const Register divisor = R0_tos;
1620   const Register dividend = R1_tmp;
1621 
1622   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1623   __ pop_i(dividend);
1624   __ sdiv_w(R0_tos, dividend, divisor);
1625 #else
1626   __ mov(R2, R0_tos);
1627   __ pop_i(R0);
1628   // R0 - dividend
1629   // R2 - divisor
1630   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1631   // R1 - result
1632   __ mov(R0_tos, R1);
1633 #endif // AARCH64
1634 }
1635 
1636 
1637 void TemplateTable::irem() {
1638   transition(itos, itos);
1639 #ifdef AARCH64
1640   const Register divisor = R0_tos;
1641   const Register dividend = R1_tmp;
1642   const Register quotient = R2_tmp;
1643 
1644   __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1645   __ pop_i(dividend);
1646   __ sdiv_w(quotient, dividend, divisor);
1647   __ msub_w(R0_tos, divisor, quotient, dividend);
1648 #else
1649   __ mov(R2, R0_tos);
1650   __ pop_i(R0);
1651   // R0 - dividend
1652   // R2 - divisor
1653   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1654   // R0 - remainder
1655 #endif // AARCH64
1656 }
1657 
1658 
1659 void TemplateTable::lmul() {
1660   transition(ltos, ltos);
1661 #ifdef AARCH64
1662   const Register arg1 = R0_tos;
1663   const Register arg2 = R1_tmp;
1664 
1665   __ pop_l(arg2);
1666   __ mul(R0_tos, arg1, arg2);
1667 #else
1668   const Register arg1_lo = R0_tos_lo;
1669   const Register arg1_hi = R1_tos_hi;
1670   const Register arg2_lo = R2_tmp;
1671   const Register arg2_hi = R3_tmp;
1672 
1673   __ pop_l(arg2_lo, arg2_hi);
1674 
1675   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1676 #endif // AARCH64
1677 }
1678 
1679 
1680 void TemplateTable::ldiv() {
1681   transition(ltos, ltos);
1682 #ifdef AARCH64
1683   const Register divisor = R0_tos;
1684   const Register dividend = R1_tmp;
1685 
1686   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1687   __ pop_l(dividend);
1688   __ sdiv(R0_tos, dividend, divisor);
1689 #else
1690   const Register x_lo = R2_tmp;
1691   const Register x_hi = R3_tmp;
1692   const Register y_lo = R0_tos_lo;
1693   const Register y_hi = R1_tos_hi;
1694 
1695   __ pop_l(x_lo, x_hi);
1696 
1697   // check if y = 0
1698   __ orrs(Rtemp, y_lo, y_hi);
1699   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1700   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1701 #endif // AARCH64
1702 }
1703 
1704 
1705 void TemplateTable::lrem() {
1706   transition(ltos, ltos);
1707 #ifdef AARCH64
1708   const Register divisor = R0_tos;
1709   const Register dividend = R1_tmp;
1710   const Register quotient = R2_tmp;
1711 
1712   __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1713   __ pop_l(dividend);
1714   __ sdiv(quotient, dividend, divisor);
1715   __ msub(R0_tos, divisor, quotient, dividend);
1716 #else
1717   const Register x_lo = R2_tmp;
1718   const Register x_hi = R3_tmp;
1719   const Register y_lo = R0_tos_lo;
1720   const Register y_hi = R1_tos_hi;
1721 
1722   __ pop_l(x_lo, x_hi);
1723 
1724   // check if y = 0
1725   __ orrs(Rtemp, y_lo, y_hi);
1726   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1727   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1728 #endif // AARCH64
1729 }
1730 
1731 
1732 void TemplateTable::lshl() {
1733   transition(itos, ltos);
1734 #ifdef AARCH64
1735   const Register val = R1_tmp;
1736   const Register shift_cnt = R0_tos;
1737   __ pop_l(val);
1738   __ lslv(R0_tos, val, shift_cnt);
1739 #else
1740   const Register shift_cnt = R4_tmp;
1741   const Register val_lo = R2_tmp;
1742   const Register val_hi = R3_tmp;
1743 
1744   __ pop_l(val_lo, val_hi);
1745   __ andr(shift_cnt, R0_tos, 63);
1746   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1747 #endif // AARCH64
1748 }
1749 
1750 
1751 void TemplateTable::lshr() {
1752   transition(itos, ltos);
1753 #ifdef AARCH64
1754   const Register val = R1_tmp;
1755   const Register shift_cnt = R0_tos;
1756   __ pop_l(val);
1757   __ asrv(R0_tos, val, shift_cnt);
1758 #else
1759   const Register shift_cnt = R4_tmp;
1760   const Register val_lo = R2_tmp;
1761   const Register val_hi = R3_tmp;
1762 
1763   __ pop_l(val_lo, val_hi);
1764   __ andr(shift_cnt, R0_tos, 63);
1765   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1766 #endif // AARCH64
1767 }
1768 
1769 
1770 void TemplateTable::lushr() {
1771   transition(itos, ltos);
1772 #ifdef AARCH64
1773   const Register val = R1_tmp;
1774   const Register shift_cnt = R0_tos;
1775   __ pop_l(val);
1776   __ lsrv(R0_tos, val, shift_cnt);
1777 #else
1778   const Register shift_cnt = R4_tmp;
1779   const Register val_lo = R2_tmp;
1780   const Register val_hi = R3_tmp;
1781 
1782   __ pop_l(val_lo, val_hi);
1783   __ andr(shift_cnt, R0_tos, 63);
1784   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1785 #endif // AARCH64
1786 }
1787 
1788 
1789 void TemplateTable::fop2(Operation op) {
1790   transition(ftos, ftos);
1791 #ifdef __SOFTFP__
1792   __ mov(R1, R0_tos);
1793   __ pop_i(R0);
1794   switch (op) {
1795     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1796     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1797     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1798     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1799     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1800     default : ShouldNotReachHere();
1801   }
1802 #else
1803   const FloatRegister arg1 = S1_tmp;
1804   const FloatRegister arg2 = S0_tos;
1805 
1806   switch (op) {
1807     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1808     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1809     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1810     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1811     case rem:
1812 #ifndef __ABI_HARD__
1813       __ pop_f(arg1);
1814       __ fmrs(R0, arg1);
1815       __ fmrs(R1, arg2);
1816       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1817       __ fmsr(S0_tos, R0);
1818 #else
1819       __ mov_float(S1_reg, arg2);
1820       __ pop_f(S0);
1821       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1822 #endif // !__ABI_HARD__
1823       break;
1824     default : ShouldNotReachHere();
1825   }
1826 #endif // __SOFTFP__
1827 }
1828 
1829 
1830 void TemplateTable::dop2(Operation op) {
1831   transition(dtos, dtos);
1832 #ifdef __SOFTFP__
1833   __ mov(R2, R0_tos_lo);
1834   __ mov(R3, R1_tos_hi);
1835   __ pop_l(R0, R1);
1836   switch (op) {
1837     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1838     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1839     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1840     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1841     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1842     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1843     default : ShouldNotReachHere();
1844   }
1845 #else
1846   const FloatRegister arg1 = D1_tmp;
1847   const FloatRegister arg2 = D0_tos;
1848 
1849   switch (op) {
1850     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1851     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1852     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1853     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1854     case rem:
1855 #ifndef __ABI_HARD__
1856       __ pop_d(arg1);
1857       __ fmrrd(R0, R1, arg1);
1858       __ fmrrd(R2, R3, arg2);
1859       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1860       __ fmdrr(D0_tos, R0, R1);
1861 #else
1862       __ mov_double(D1, arg2);
1863       __ pop_d(D0);
1864       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1865 #endif // !__ABI_HARD__
1866       break;
1867     default : ShouldNotReachHere();
1868   }
1869 #endif // __SOFTFP__
1870 }
1871 
1872 
1873 void TemplateTable::ineg() {
1874   transition(itos, itos);
1875   __ neg_32(R0_tos, R0_tos);
1876 }
1877 
1878 
1879 void TemplateTable::lneg() {
1880   transition(ltos, ltos);
1881 #ifdef AARCH64
1882   __ neg(R0_tos, R0_tos);
1883 #else
1884   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1885   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1886 #endif // AARCH64
1887 }
1888 
1889 
1890 void TemplateTable::fneg() {
1891   transition(ftos, ftos);
1892 #ifdef __SOFTFP__
1893   // Invert sign bit
1894   const int sign_mask = 0x80000000;
1895   __ eor(R0_tos, R0_tos, sign_mask);
1896 #else
1897   __ neg_float(S0_tos, S0_tos);
1898 #endif // __SOFTFP__
1899 }
1900 
1901 
1902 void TemplateTable::dneg() {
1903   transition(dtos, dtos);
1904 #ifdef __SOFTFP__
1905   // Invert sign bit in the high part of the double
1906   const int sign_mask_hi = 0x80000000;
1907   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1908 #else
1909   __ neg_double(D0_tos, D0_tos);
1910 #endif // __SOFTFP__
1911 }
1912 
1913 
1914 void TemplateTable::iinc() {
1915   transition(vtos, vtos);
1916   const Register Rconst = R2_tmp;
1917   const Register Rlocal_index = R1_tmp;
1918   const Register Rval = R0_tmp;
1919 
1920   __ ldrsb(Rconst, at_bcp(2));
1921   locals_index(Rlocal_index);
1922   Address local = load_iaddress(Rlocal_index, Rtemp);
1923   __ ldr_s32(Rval, local);
1924   __ add(Rval, Rval, Rconst);
1925   __ str_32(Rval, local);
1926 }
1927 
1928 
1929 void TemplateTable::wide_iinc() {
1930   transition(vtos, vtos);
1931   const Register Rconst = R2_tmp;
1932   const Register Rlocal_index = R1_tmp;
1933   const Register Rval = R0_tmp;
1934 
1935   // get constant in Rconst
1936   __ ldrsb(R2_tmp, at_bcp(4));
1937   __ ldrb(R3_tmp, at_bcp(5));
1938   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1939 
1940   locals_index_wide(Rlocal_index);
1941   Address local = load_iaddress(Rlocal_index, Rtemp);
1942   __ ldr_s32(Rval, local);
1943   __ add(Rval, Rval, Rconst);
1944   __ str_32(Rval, local);
1945 }
1946 
1947 
1948 void TemplateTable::convert() {
1949   // Checking
1950 #ifdef ASSERT
1951   { TosState tos_in  = ilgl;
1952     TosState tos_out = ilgl;
1953     switch (bytecode()) {
1954       case Bytecodes::_i2l: // fall through
1955       case Bytecodes::_i2f: // fall through
1956       case Bytecodes::_i2d: // fall through
1957       case Bytecodes::_i2b: // fall through
1958       case Bytecodes::_i2c: // fall through
1959       case Bytecodes::_i2s: tos_in = itos; break;
1960       case Bytecodes::_l2i: // fall through
1961       case Bytecodes::_l2f: // fall through
1962       case Bytecodes::_l2d: tos_in = ltos; break;
1963       case Bytecodes::_f2i: // fall through
1964       case Bytecodes::_f2l: // fall through
1965       case Bytecodes::_f2d: tos_in = ftos; break;
1966       case Bytecodes::_d2i: // fall through
1967       case Bytecodes::_d2l: // fall through
1968       case Bytecodes::_d2f: tos_in = dtos; break;
1969       default             : ShouldNotReachHere();
1970     }
1971     switch (bytecode()) {
1972       case Bytecodes::_l2i: // fall through
1973       case Bytecodes::_f2i: // fall through
1974       case Bytecodes::_d2i: // fall through
1975       case Bytecodes::_i2b: // fall through
1976       case Bytecodes::_i2c: // fall through
1977       case Bytecodes::_i2s: tos_out = itos; break;
1978       case Bytecodes::_i2l: // fall through
1979       case Bytecodes::_f2l: // fall through
1980       case Bytecodes::_d2l: tos_out = ltos; break;
1981       case Bytecodes::_i2f: // fall through
1982       case Bytecodes::_l2f: // fall through
1983       case Bytecodes::_d2f: tos_out = ftos; break;
1984       case Bytecodes::_i2d: // fall through
1985       case Bytecodes::_l2d: // fall through
1986       case Bytecodes::_f2d: tos_out = dtos; break;
1987       default             : ShouldNotReachHere();
1988     }
1989     transition(tos_in, tos_out);
1990   }
1991 #endif // ASSERT
1992 
1993   // Conversion
1994   switch (bytecode()) {
1995     case Bytecodes::_i2l:
1996 #ifdef AARCH64
1997       __ sign_extend(R0_tos, R0_tos, 32);
1998 #else
1999       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
2000 #endif // AARCH64
2001       break;
2002 
2003     case Bytecodes::_i2f:
2004 #ifdef AARCH64
2005       __ scvtf_sw(S0_tos, R0_tos);
2006 #else
2007 #ifdef __SOFTFP__
2008       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
2009 #else
2010       __ fmsr(S0_tmp, R0_tos);
2011       __ fsitos(S0_tos, S0_tmp);
2012 #endif // __SOFTFP__
2013 #endif // AARCH64
2014       break;
2015 
2016     case Bytecodes::_i2d:
2017 #ifdef AARCH64
2018       __ scvtf_dw(D0_tos, R0_tos);
2019 #else
2020 #ifdef __SOFTFP__
2021       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
2022 #else
2023       __ fmsr(S0_tmp, R0_tos);
2024       __ fsitod(D0_tos, S0_tmp);
2025 #endif // __SOFTFP__
2026 #endif // AARCH64
2027       break;
2028 
2029     case Bytecodes::_i2b:
2030       __ sign_extend(R0_tos, R0_tos, 8);
2031       break;
2032 
2033     case Bytecodes::_i2c:
2034       __ zero_extend(R0_tos, R0_tos, 16);
2035       break;
2036 
2037     case Bytecodes::_i2s:
2038       __ sign_extend(R0_tos, R0_tos, 16);
2039       break;
2040 
2041     case Bytecodes::_l2i:
2042       /* nothing to do */
2043       break;
2044 
2045     case Bytecodes::_l2f:
2046 #ifdef AARCH64
2047       __ scvtf_sx(S0_tos, R0_tos);
2048 #else
2049       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
2050 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
2051       __ fmsr(S0_tos, R0);
2052 #endif // !__SOFTFP__ && !__ABI_HARD__
2053 #endif // AARCH64
2054       break;
2055 
2056     case Bytecodes::_l2d:
2057 #ifdef AARCH64
2058       __ scvtf_dx(D0_tos, R0_tos);
2059 #else
2060       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
2061 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
2062       __ fmdrr(D0_tos, R0, R1);
2063 #endif // !__SOFTFP__ && !__ABI_HARD__
2064 #endif // AARCH64
2065       break;
2066 
2067     case Bytecodes::_f2i:
2068 #ifdef AARCH64
2069       __ fcvtzs_ws(R0_tos, S0_tos);
2070 #else
2071 #ifndef __SOFTFP__
2072       __ ftosizs(S0_tos, S0_tos);
2073       __ fmrs(R0_tos, S0_tos);
2074 #else
2075       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
2076 #endif // !__SOFTFP__
2077 #endif // AARCH64
2078       break;
2079 
2080     case Bytecodes::_f2l:
2081 #ifdef AARCH64
2082       __ fcvtzs_xs(R0_tos, S0_tos);
2083 #else
2084 #ifndef __SOFTFP__
2085       __ fmrs(R0_tos, S0_tos);
2086 #endif // !__SOFTFP__
2087       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
2088 #endif // AARCH64
2089       break;
2090 
2091     case Bytecodes::_f2d:
2092 #ifdef __SOFTFP__
2093       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
2094 #else
2095       __ convert_f2d(D0_tos, S0_tos);
2096 #endif // __SOFTFP__
2097       break;
2098 
2099     case Bytecodes::_d2i:
2100 #ifdef AARCH64
2101       __ fcvtzs_wd(R0_tos, D0_tos);
2102 #else
2103 #ifndef __SOFTFP__
2104       __ ftosizd(Stemp, D0);
2105       __ fmrs(R0, Stemp);
2106 #else
2107       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
2108 #endif // !__SOFTFP__
2109 #endif // AARCH64
2110       break;
2111 
2112     case Bytecodes::_d2l:
2113 #ifdef AARCH64
2114       __ fcvtzs_xd(R0_tos, D0_tos);
2115 #else
2116 #ifndef __SOFTFP__
2117       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
2118 #endif // !__SOFTFP__
2119       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
2120 #endif // AARCH64
2121       break;
2122 
2123     case Bytecodes::_d2f:
2124 #ifdef __SOFTFP__
2125       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
2126 #else
2127       __ convert_d2f(S0_tos, D0_tos);
2128 #endif // __SOFTFP__
2129       break;
2130 
2131     default:
2132       ShouldNotReachHere();
2133   }
2134 }
2135 
2136 
2137 void TemplateTable::lcmp() {
2138   transition(ltos, itos);
2139 #ifdef AARCH64
2140   const Register arg1 = R1_tmp;
2141   const Register arg2 = R0_tos;
2142 
2143   __ pop_l(arg1);
2144 
2145   __ cmp(arg1, arg2);
2146   __ cset(R0_tos, gt);               // 1 if '>', else 0
2147   __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2148 #else
2149   const Register arg1_lo = R2_tmp;
2150   const Register arg1_hi = R3_tmp;
2151   const Register arg2_lo = R0_tos_lo;
2152   const Register arg2_hi = R1_tos_hi;
2153   const Register res = R4_tmp;
2154 
2155   __ pop_l(arg1_lo, arg1_hi);
2156 
2157   // long compare arg1 with arg2
2158   // result is -1/0/+1 if '<'/'='/'>'
2159   Label done;
2160 
2161   __ mov (res, 0);
2162   __ cmp (arg1_hi, arg2_hi);
2163   __ mvn (res, 0, lt);
2164   __ mov (res, 1, gt);
2165   __ b(done, ne);
2166   __ cmp (arg1_lo, arg2_lo);
2167   __ mvn (res, 0, lo);
2168   __ mov (res, 1, hi);
2169   __ bind(done);
2170   __ mov (R0_tos, res);
2171 #endif // AARCH64
2172 }
2173 
2174 
2175 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2176   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2177 
2178 #ifdef AARCH64
2179   if (is_float) {
2180     transition(ftos, itos);
2181     __ pop_f(S1_tmp);
2182     __ fcmp_s(S1_tmp, S0_tos);
2183   } else {
2184     transition(dtos, itos);
2185     __ pop_d(D1_tmp);
2186     __ fcmp_d(D1_tmp, D0_tos);
2187   }
2188 
2189   if (unordered_result < 0) {
2190     __ cset(R0_tos, gt);               // 1 if '>', else 0
2191     __ csinv(R0_tos, R0_tos, ZR, ge);  // previous value if '>=', else -1
2192   } else {
2193     __ cset(R0_tos, hi);               // 1 if '>' or unordered, else 0
2194     __ csinv(R0_tos, R0_tos, ZR, pl);  // previous value if '>=' or unordered, else -1
2195   }
2196 
2197 #else
2198 
2199 #ifdef __SOFTFP__
2200 
2201   if (is_float) {
2202     transition(ftos, itos);
2203     const Register Rx = R0;
2204     const Register Ry = R1;
2205 
2206     __ mov(Ry, R0_tos);
2207     __ pop_i(Rx);
2208 
2209     if (unordered_result == 1) {
2210       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2211     } else {
2212       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2213     }
2214 
2215   } else {
2216 
2217     transition(dtos, itos);
2218     const Register Rx_lo = R0;
2219     const Register Rx_hi = R1;
2220     const Register Ry_lo = R2;
2221     const Register Ry_hi = R3;
2222 
2223     __ mov(Ry_lo, R0_tos_lo);
2224     __ mov(Ry_hi, R1_tos_hi);
2225     __ pop_l(Rx_lo, Rx_hi);
2226 
2227     if (unordered_result == 1) {
2228       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2229     } else {
2230       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2231     }
2232   }
2233 
2234 #else
2235 
2236   if (is_float) {
2237     transition(ftos, itos);
2238     __ pop_f(S1_tmp);
2239     __ fcmps(S1_tmp, S0_tos);
2240   } else {
2241     transition(dtos, itos);
2242     __ pop_d(D1_tmp);
2243     __ fcmpd(D1_tmp, D0_tos);
2244   }
2245 
2246   __ fmstat();
2247 
2248   // comparison result | flag N | flag Z | flag C | flag V
2249   // "<"               |   1    |   0    |   0    |   0
2250   // "=="              |   0    |   1    |   1    |   0
2251   // ">"               |   0    |   0    |   1    |   0
2252   // unordered         |   0    |   0    |   1    |   1
2253 
2254   if (unordered_result < 0) {
2255     __ mov(R0_tos, 1);           // result ==  1 if greater
2256     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2257   } else {
2258     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2259     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2260   }
2261   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2262 #endif // __SOFTFP__
2263 #endif // AARCH64
2264 }
2265 
2266 
2267 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2268 
2269   const Register Rdisp = R0_tmp;
2270   const Register Rbumped_taken_count = R5_tmp;
2271 
2272   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2273 
2274   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2275                              InvocationCounter::counter_offset();
2276   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2277                               InvocationCounter::counter_offset();
2278   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2279 
2280   // Load up R0 with the branch displacement
2281   if (is_wide) {
2282     __ ldrsb(R0_tmp, at_bcp(1));
2283     __ ldrb(R1_tmp, at_bcp(2));
2284     __ ldrb(R2_tmp, at_bcp(3));
2285     __ ldrb(R3_tmp, at_bcp(4));
2286     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2287     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2288     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2289   } else {
2290     __ ldrsb(R0_tmp, at_bcp(1));
2291     __ ldrb(R1_tmp, at_bcp(2));
2292     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2293   }
2294 
2295   // Handle all the JSR stuff here, then exit.
2296   // It's much shorter and cleaner than intermingling with the
2297   // non-JSR normal-branch stuff occuring below.
2298   if (is_jsr) {
2299     // compute return address as bci in R1
2300     const Register Rret_addr = R1_tmp;
2301     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2302 
2303     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2304     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2305     __ sub(Rret_addr, Rret_addr, Rtemp);
2306 
2307     // Load the next target bytecode into R3_bytecode and advance Rbcp
2308 #ifdef AARCH64
2309     __ add(Rbcp, Rbcp, Rdisp);
2310     __ ldrb(R3_bytecode, Address(Rbcp));
2311 #else
2312     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2313 #endif // AARCH64
2314 
2315     // Push return address
2316     __ push_i(Rret_addr);
2317     // jsr returns vtos
2318     __ dispatch_only_noverify(vtos);
2319     return;
2320   }
2321 
2322   // Normal (non-jsr) branch handling
2323 
2324   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2325 #ifdef AARCH64
2326   __ add(Rbcp, Rbcp, Rdisp);
2327   __ ldrb(R3_bytecode, Address(Rbcp));
2328 #else
2329   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2330 #endif // AARCH64
2331 
2332   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2333   Label backedge_counter_overflow;
2334   Label profile_method;
2335   Label dispatch;
2336 
2337   if (UseLoopCounter) {
2338     // increment backedge counter for backward branches
2339     // Rdisp (R0): target offset
2340 
2341     const Register Rcnt = R2_tmp;
2342     const Register Rcounters = R1_tmp;
2343 
2344     // count only if backward branch
2345 #ifdef AARCH64
2346     __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2347 #else
2348     __ tst(Rdisp, Rdisp);
2349     __ b(dispatch, pl);
2350 #endif // AARCH64
2351 
2352     if (TieredCompilation) {
2353       Label no_mdo;
2354       int increment = InvocationCounter::count_increment;
2355       if (ProfileInterpreter) {
2356         // Are we profiling?
2357         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2358         __ cbz(Rtemp, no_mdo);
2359         // Increment the MDO backedge counter
2360         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2361                                                   in_bytes(InvocationCounter::counter_offset()));
2362         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2363         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2364                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2365         __ b(dispatch);
2366       }
2367       __ bind(no_mdo);
2368       // Increment backedge counter in MethodCounters*
2369       // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2370       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2371                              Rdisp, R3_bytecode,
2372                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2373       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2374       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2375                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2376     } else {
2377       // Increment backedge counter in MethodCounters*
2378       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2379                              Rdisp, R3_bytecode,
2380                              AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2381       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2382       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2383       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2384 
2385       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2386 #ifdef AARCH64
2387       __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value);  // and the status bits
2388 #else
2389       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2390 #endif // AARCH64
2391       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2392 
2393       if (ProfileInterpreter) {
2394         // Test to see if we should create a method data oop
2395         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2396         __ ldr_s32(Rtemp, profile_limit);
2397         __ cmp_32(Rcnt, Rtemp);
2398         __ b(dispatch, lt);
2399 
2400         // if no method data exists, go to profile method
2401         __ test_method_data_pointer(R4_tmp, profile_method);
2402 
2403         if (UseOnStackReplacement) {
2404           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2405           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2406           __ ldr_s32(Rtemp, backward_branch_limit);
2407           __ cmp(Rbumped_taken_count, Rtemp);
2408           __ b(dispatch, lo);
2409 
2410           // When ProfileInterpreter is on, the backedge_count comes from the
2411           // MethodData*, which value does not get reset on the call to
2412           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2413           // routine while the method is being compiled, add a second test to make
2414           // sure the overflow function is called only once every overflow_frequency.
2415           const int overflow_frequency = 1024;
2416 
2417 #ifdef AARCH64
2418           __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2419 #else
2420           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2421           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2422           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2423 #endif // AARCH64
2424 
2425           __ b(backedge_counter_overflow, eq);
2426         }
2427       } else {
2428         if (UseOnStackReplacement) {
2429           // check for overflow against Rcnt, which is the sum of the counters
2430           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2431           __ ldr_s32(Rtemp, backward_branch_limit);
2432           __ cmp_32(Rcnt, Rtemp);
2433           __ b(backedge_counter_overflow, hs);
2434 
2435         }
2436       }
2437     }
2438     __ bind(dispatch);
2439   }
2440 
2441   if (!UseOnStackReplacement) {
2442     __ bind(backedge_counter_overflow);
2443   }
2444 
2445   // continue with the bytecode @ target
2446   __ dispatch_only(vtos);
2447 
2448   if (UseLoopCounter) {
2449     if (ProfileInterpreter) {
2450       // Out-of-line code to allocate method data oop.
2451       __ bind(profile_method);
2452 
2453       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2454       __ set_method_data_pointer_for_bcp();
2455       // reload next bytecode
2456       __ ldrb(R3_bytecode, Address(Rbcp));
2457       __ b(dispatch);
2458     }
2459 
2460     if (UseOnStackReplacement) {
2461       // invocation counter overflow
2462       __ bind(backedge_counter_overflow);
2463 
2464       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2465       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2466 
2467       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2468       const Register Rnmethod = R0;
2469 
2470       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2471 
2472       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2473 
2474       // nmethod may have been invalidated (VM may block upon call_VM return)
2475       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2476       __ cmp(R1_tmp, nmethod::in_use);
2477       __ b(dispatch, ne);
2478 
2479       // We have the address of an on stack replacement routine in Rnmethod,
2480       // We need to prepare to execute the OSR method. First we must
2481       // migrate the locals and monitors off of the stack.
2482 
2483       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2484 
2485       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2486 
2487       // R0 is OSR buffer
2488 
2489       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2490       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2491 
2492 #ifdef AARCH64
2493       __ ldp(FP, LR, Address(FP));
2494       __ mov(SP, Rtemp);
2495 #else
2496       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2497       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2498 #endif // AARCH64
2499 
2500       __ jump(R1_tmp);
2501     }
2502   }
2503 }
2504 
2505 
2506 void TemplateTable::if_0cmp(Condition cc) {
2507   transition(itos, vtos);
2508   // assume branch is more often taken than not (loops use backward branches)
2509   Label not_taken;
2510 #ifdef AARCH64
2511   if (cc == equal) {
2512     __ cbnz_w(R0_tos, not_taken);
2513   } else if (cc == not_equal) {
2514     __ cbz_w(R0_tos, not_taken);
2515   } else {
2516     __ cmp_32(R0_tos, 0);
2517     __ b(not_taken, convNegCond(cc));
2518   }
2519 #else
2520   __ cmp_32(R0_tos, 0);
2521   __ b(not_taken, convNegCond(cc));
2522 #endif // AARCH64
2523   branch(false, false);
2524   __ bind(not_taken);
2525   __ profile_not_taken_branch(R0_tmp);
2526 }
2527 
2528 
2529 void TemplateTable::if_icmp(Condition cc) {
2530   transition(itos, vtos);
2531   // assume branch is more often taken than not (loops use backward branches)
2532   Label not_taken;
2533   __ pop_i(R1_tmp);
2534   __ cmp_32(R1_tmp, R0_tos);
2535   __ b(not_taken, convNegCond(cc));
2536   branch(false, false);
2537   __ bind(not_taken);
2538   __ profile_not_taken_branch(R0_tmp);
2539 }
2540 
2541 
2542 void TemplateTable::if_nullcmp(Condition cc) {
2543   transition(atos, vtos);
2544   assert(cc == equal || cc == not_equal, "invalid condition");
2545 
2546   // assume branch is more often taken than not (loops use backward branches)
2547   Label not_taken;
2548   if (cc == equal) {
2549     __ cbnz(R0_tos, not_taken);
2550   } else {
2551     __ cbz(R0_tos, not_taken);
2552   }
2553   branch(false, false);
2554   __ bind(not_taken);
2555   __ profile_not_taken_branch(R0_tmp);
2556 }
2557 
2558 
2559 void TemplateTable::if_acmp(Condition cc) {
2560   transition(atos, vtos);
2561   // assume branch is more often taken than not (loops use backward branches)
2562   Label not_taken;
2563   __ pop_ptr(R1_tmp);
2564   __ cmp(R1_tmp, R0_tos);
2565   __ b(not_taken, convNegCond(cc));
2566   branch(false, false);
2567   __ bind(not_taken);
2568   __ profile_not_taken_branch(R0_tmp);
2569 }
2570 
2571 
2572 void TemplateTable::ret() {
2573   transition(vtos, vtos);
2574   const Register Rlocal_index = R1_tmp;
2575   const Register Rret_bci = Rtmp_save0; // R4/R19
2576 
2577   locals_index(Rlocal_index);
2578   Address local = load_iaddress(Rlocal_index, Rtemp);
2579   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2580   __ profile_ret(Rtmp_save1, Rret_bci);
2581   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2582   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2583   __ add(Rbcp, Rtemp, Rret_bci);
2584   __ dispatch_next(vtos);
2585 }
2586 
2587 
2588 void TemplateTable::wide_ret() {
2589   transition(vtos, vtos);
2590   const Register Rlocal_index = R1_tmp;
2591   const Register Rret_bci = Rtmp_save0; // R4/R19
2592 
2593   locals_index_wide(Rlocal_index);
2594   Address local = load_iaddress(Rlocal_index, Rtemp);
2595   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2596   __ profile_ret(Rtmp_save1, Rret_bci);
2597   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2598   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2599   __ add(Rbcp, Rtemp, Rret_bci);
2600   __ dispatch_next(vtos);
2601 }
2602 
2603 
2604 void TemplateTable::tableswitch() {
2605   transition(itos, vtos);
2606 
2607   const Register Rindex  = R0_tos;
2608 #ifndef AARCH64
2609   const Register Rtemp2  = R1_tmp;
2610 #endif // !AARCH64
2611   const Register Rabcp   = R2_tmp;  // aligned bcp
2612   const Register Rlow    = R3_tmp;
2613   const Register Rhigh   = R4_tmp;
2614   const Register Roffset = R5_tmp;
2615 
2616   // align bcp
2617   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2618   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2619 
2620   // load lo & hi
2621 #ifdef AARCH64
2622   __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2623 #else
2624   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2625 #endif // AARCH64
2626   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2627   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2628 
2629   // compare index with high bound
2630   __ cmp_32(Rhigh, Rindex);
2631 
2632 #ifdef AARCH64
2633   Label default_case, do_dispatch;
2634   __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2635   __ b(default_case, lt);
2636 
2637   __ sub_w(Rindex, Rindex, Rlow);
2638   __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2639   if(ProfileInterpreter) {
2640     __ sxtw(Rindex, Rindex);
2641     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2642   }
2643   __ b(do_dispatch);
2644 
2645   __ bind(default_case);
2646   __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2647   if(ProfileInterpreter) {
2648     __ profile_switch_default(R0_tmp);
2649   }
2650 
2651   __ bind(do_dispatch);
2652 #else
2653 
2654   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2655   __ subs(Rindex, Rindex, Rlow, ge);
2656 
2657   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2658   // ("ge" status accumulated from cmp and subs instructions) then load
2659   // offset from table, otherwise load offset for default case
2660 
2661   if(ProfileInterpreter) {
2662     Label default_case, continue_execution;
2663 
2664     __ b(default_case, lt);
2665     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2666     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2667     __ b(continue_execution);
2668 
2669     __ bind(default_case);
2670     __ profile_switch_default(R0_tmp);
2671     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2672 
2673     __ bind(continue_execution);
2674   } else {
2675     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2676     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2677   }
2678 #endif // AARCH64
2679 
2680   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2681 
2682   // load the next bytecode to R3_bytecode and advance Rbcp
2683 #ifdef AARCH64
2684   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2685   __ ldrb(R3_bytecode, Address(Rbcp));
2686 #else
2687   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2688 #endif // AARCH64
2689   __ dispatch_only(vtos);
2690 
2691 }
2692 
2693 
2694 void TemplateTable::lookupswitch() {
2695   transition(itos, itos);
2696   __ stop("lookupswitch bytecode should have been rewritten");
2697 }
2698 
2699 
2700 void TemplateTable::fast_linearswitch() {
2701   transition(itos, vtos);
2702   Label loop, found, default_case, continue_execution;
2703 
2704   const Register Rkey     = R0_tos;
2705   const Register Rabcp    = R2_tmp;  // aligned bcp
2706   const Register Rdefault = R3_tmp;
2707   const Register Rcount   = R4_tmp;
2708   const Register Roffset  = R5_tmp;
2709 
2710   // bswap Rkey, so we can avoid bswapping the table entries
2711   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2712 
2713   // align bcp
2714   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2715   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2716 
2717   // load default & counter
2718 #ifdef AARCH64
2719   __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2720 #else
2721   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2722 #endif // AARCH64
2723   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2724 
2725 #ifdef AARCH64
2726   __ cbz_w(Rcount, default_case);
2727 #else
2728   __ cmp_32(Rcount, 0);
2729   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2730   __ b(default_case, eq);
2731 #endif // AARCH64
2732 
2733   // table search
2734   __ bind(loop);
2735 #ifdef AARCH64
2736   __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2737 #endif // AARCH64
2738   __ cmp_32(Rtemp, Rkey);
2739   __ b(found, eq);
2740   __ subs(Rcount, Rcount, 1);
2741 #ifndef AARCH64
2742   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2743 #endif // !AARCH64
2744   __ b(loop, ne);
2745 
2746   // default case
2747   __ bind(default_case);
2748   __ profile_switch_default(R0_tmp);
2749   __ mov(Roffset, Rdefault);
2750   __ b(continue_execution);
2751 
2752   // entry found -> get offset
2753   __ bind(found);
2754   // Rabcp is already incremented and points to the next entry
2755   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2756   if (ProfileInterpreter) {
2757     // Calculate index of the selected case.
2758     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2759 
2760     // align bcp
2761     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2762     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2763 
2764     // load number of cases
2765     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2766     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2767 
2768     // Selected index = <number of cases> - <current loop count>
2769     __ sub(R1_tmp, R2_tmp, Rcount);
2770     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2771   }
2772 
2773   // continue execution
2774   __ bind(continue_execution);
2775   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2776 
2777   // load the next bytecode to R3_bytecode and advance Rbcp
2778 #ifdef AARCH64
2779   __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2780   __ ldrb(R3_bytecode, Address(Rbcp));
2781 #else
2782   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2783 #endif // AARCH64
2784   __ dispatch_only(vtos);
2785 }
2786 
2787 
2788 void TemplateTable::fast_binaryswitch() {
2789   transition(itos, vtos);
2790   // Implementation using the following core algorithm:
2791   //
2792   // int binary_search(int key, LookupswitchPair* array, int n) {
2793   //   // Binary search according to "Methodik des Programmierens" by
2794   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2795   //   int i = 0;
2796   //   int j = n;
2797   //   while (i+1 < j) {
2798   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2799   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2800   //     // where a stands for the array and assuming that the (inexisting)
2801   //     // element a[n] is infinitely big.
2802   //     int h = (i + j) >> 1;
2803   //     // i < h < j
2804   //     if (key < array[h].fast_match()) {
2805   //       j = h;
2806   //     } else {
2807   //       i = h;
2808   //     }
2809   //   }
2810   //   // R: a[i] <= key < a[i+1] or Q
2811   //   // (i.e., if key is within array, i is the correct index)
2812   //   return i;
2813   // }
2814 
2815   // register allocation
2816   const Register key    = R0_tos;                // already set (tosca)
2817   const Register array  = R1_tmp;
2818   const Register i      = R2_tmp;
2819   const Register j      = R3_tmp;
2820   const Register h      = R4_tmp;
2821   const Register val    = R5_tmp;
2822   const Register temp1  = Rtemp;
2823   const Register temp2  = LR_tmp;
2824   const Register offset = R3_tmp;
2825 
2826   // set 'array' = aligned bcp + 2 ints
2827   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2828   __ align_reg(array, temp1, BytesPerInt);
2829 
2830   // initialize i & j
2831   __ mov(i, 0);                                  // i = 0;
2832   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2833   // Convert j into native byteordering
2834   __ byteswap_u32(j, temp1, temp2);
2835 
2836   // and start
2837   Label entry;
2838   __ b(entry);
2839 
2840   // binary search loop
2841   { Label loop;
2842     __ bind(loop);
2843     // int h = (i + j) >> 1;
2844     __ add(h, i, j);                             // h = i + j;
2845     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2846     // if (key < array[h].fast_match()) {
2847     //   j = h;
2848     // } else {
2849     //   i = h;
2850     // }
2851 #ifdef AARCH64
2852     __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2853     __ ldr_s32(val, Address(temp1));
2854 #else
2855     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2856 #endif // AARCH64
2857     // Convert array[h].match to native byte-ordering before compare
2858     __ byteswap_u32(val, temp1, temp2);
2859     __ cmp_32(key, val);
2860     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2861     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2862     // while (i+1 < j)
2863     __ bind(entry);
2864     __ add(temp1, i, 1);                             // i+1
2865     __ cmp(temp1, j);                                // i+1 < j
2866     __ b(loop, lt);
2867   }
2868 
2869   // end of binary search, result index is i (must check again!)
2870   Label default_case;
2871   // Convert array[i].match to native byte-ordering before compare
2872 #ifdef AARCH64
2873   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2874   __ ldr_s32(val, Address(temp1));
2875 #else
2876   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2877 #endif // AARCH64
2878   __ byteswap_u32(val, temp1, temp2);
2879   __ cmp_32(key, val);
2880   __ b(default_case, ne);
2881 
2882   // entry found
2883   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2884   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2885   __ profile_switch_case(R0, i, R1, i);
2886   __ byteswap_u32(offset, temp1, temp2);
2887 #ifdef AARCH64
2888   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2889   __ ldrb(R3_bytecode, Address(Rbcp));
2890 #else
2891   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2892 #endif // AARCH64
2893   __ dispatch_only(vtos);
2894 
2895   // default case
2896   __ bind(default_case);
2897   __ profile_switch_default(R0);
2898   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2899   __ byteswap_u32(offset, temp1, temp2);
2900 #ifdef AARCH64
2901   __ add(Rbcp, Rbcp, offset, ex_sxtw);
2902   __ ldrb(R3_bytecode, Address(Rbcp));
2903 #else
2904   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2905 #endif // AARCH64
2906   __ dispatch_only(vtos);
2907 }
2908 
2909 
2910 void TemplateTable::_return(TosState state) {
2911   transition(state, state);
2912   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2913 
2914   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2915     Label skip_register_finalizer;
2916     assert(state == vtos, "only valid state");
2917     __ ldr(R1, aaddress(0));
2918     __ load_klass(Rtemp, R1);
2919     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2920     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2921 
2922     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2923 
2924     __ bind(skip_register_finalizer);
2925   }
2926 
2927   // Narrow result if state is itos but result type is smaller.
2928   // Need to narrow in the return bytecode rather than in generate_return_entry
2929   // since compiled code callers expect the result to already be narrowed.
2930   if (state == itos) {
2931     __ narrow(R0_tos);
2932   }
2933   __ remove_activation(state, LR);
2934 
2935   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2936 
2937 #ifndef AARCH64
2938   // According to interpreter calling conventions, result is returned in R0/R1,
2939   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2940   // This conversion should be done after remove_activation, as it uses
2941   // push(state) & pop(state) to preserve return value.
2942   __ convert_tos_to_retval(state);
2943 #endif // !AARCH64
2944 
2945   __ ret();
2946 
2947   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2948   __ nop();
2949 }
2950 
2951 
2952 // ----------------------------------------------------------------------------
2953 // Volatile variables demand their effects be made known to all CPU's in
2954 // order.  Store buffers on most chips allow reads & writes to reorder; the
2955 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2956 // memory barrier (i.e., it's not sufficient that the interpreter does not
2957 // reorder volatile references, the hardware also must not reorder them).
2958 //
2959 // According to the new Java Memory Model (JMM):
2960 // (1) All volatiles are serialized wrt to each other.
2961 // ALSO reads & writes act as aquire & release, so:
2962 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2963 // the read float up to before the read.  It's OK for non-volatile memory refs
2964 // that happen before the volatile read to float down below it.
2965 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2966 // that happen BEFORE the write float down to after the write.  It's OK for
2967 // non-volatile memory refs that happen after the volatile write to float up
2968 // before it.
2969 //
2970 // We only put in barriers around volatile refs (they are expensive), not
2971 // _between_ memory refs (that would require us to track the flavor of the
2972 // previous memory refs).  Requirements (2) and (3) require some barriers
2973 // before volatile stores and after volatile loads.  These nearly cover
2974 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2975 // case is placed after volatile-stores although it could just as well go
2976 // before volatile-loads.
2977 // TODO-AARCH64: consider removing extra unused parameters
2978 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2979                                      Register tmp,
2980                                      bool preserve_flags,
2981                                      Register load_tgt) {
2982 #ifdef AARCH64
2983   __ membar(order_constraint);
2984 #else
2985   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2986 #endif
2987 }
2988 
2989 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
2990 void TemplateTable::resolve_cache_and_index(int byte_no,
2991                                             Register Rcache,
2992                                             Register Rindex,
2993                                             size_t index_size) {
2994   assert_different_registers(Rcache, Rindex, Rtemp);
2995 
2996   Label resolved;
2997   Bytecodes::Code code = bytecode();
2998   switch (code) {
2999   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
3000   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
3001   }
3002 
3003   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
3004   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
3005   __ cmp(Rtemp, code);  // have we resolved this bytecode?
3006   __ b(resolved, eq);
3007 
3008   // resolve first time through
3009   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
3010   __ mov(R1, code);
3011   __ call_VM(noreg, entry, R1);
3012   // Update registers with resolved info
3013   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
3014   __ bind(resolved);
3015 }
3016 
3017 
3018 // The Rcache and Rindex registers must be set before call
3019 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
3020                                               Register Rindex,
3021                                               Register Roffset,
3022                                               Register Rflags,
3023                                               Register Robj,
3024                                               bool is_static = false) {
3025 
3026   assert_different_registers(Rcache, Rindex, Rtemp);
3027   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
3028 
3029   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3030 
3031   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3032 
3033   // Field offset
3034   __ ldr(Roffset, Address(Rtemp,
3035            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
3036 
3037   // Flags
3038   __ ldr_u32(Rflags, Address(Rtemp,
3039            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3040 
3041   if (is_static) {
3042     __ ldr(Robj, Address(Rtemp,
3043              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
3044     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3045     __ ldr(Robj, Address(Robj, mirror_offset));
3046     __ resolve_oop_handle(Robj);
3047   }
3048 }
3049 
3050 
3051 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
3052 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
3053                                                Register method,
3054                                                Register itable_index,
3055                                                Register flags,
3056                                                bool is_invokevirtual,
3057                                                bool is_invokevfinal/*unused*/,
3058                                                bool is_invokedynamic) {
3059   // setup registers
3060   const Register cache = R2_tmp;
3061   const Register index = R3_tmp;
3062   const Register temp_reg = Rtemp;
3063   assert_different_registers(cache, index, temp_reg);
3064   assert_different_registers(method, itable_index, temp_reg);
3065 
3066   // determine constant pool cache field offsets
3067   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
3068   const int method_offset = in_bytes(
3069     ConstantPoolCache::base_offset() +
3070       ((byte_no == f2_byte)
3071        ? ConstantPoolCacheEntry::f2_offset()
3072        : ConstantPoolCacheEntry::f1_offset()
3073       )
3074     );
3075   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
3076                                     ConstantPoolCacheEntry::flags_offset());
3077   // access constant pool cache fields
3078   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
3079                                     ConstantPoolCacheEntry::f2_offset());
3080 
3081   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
3082   resolve_cache_and_index(byte_no, cache, index, index_size);
3083     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
3084     __ ldr(method, Address(temp_reg, method_offset));
3085 
3086   if (itable_index != noreg) {
3087     __ ldr(itable_index, Address(temp_reg, index_offset));
3088   }
3089   __ ldr_u32(flags, Address(temp_reg, flags_offset));
3090 }
3091 
3092 
3093 // The registers cache and index expected to be set before call, and should not be Rtemp.
3094 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3095 // except cache and index registers which are preserved.
3096 void TemplateTable::jvmti_post_field_access(Register Rcache,
3097                                             Register Rindex,
3098                                             bool is_static,
3099                                             bool has_tos) {
3100   assert_different_registers(Rcache, Rindex, Rtemp);
3101 
3102   if (__ can_post_field_access()) {
3103     // Check to see if a field access watch has been set before we take
3104     // the time to call into the VM.
3105 
3106     Label Lcontinue;
3107 
3108     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
3109     __ cbz(Rtemp, Lcontinue);
3110 
3111     // cache entry pointer
3112     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3113     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
3114     if (is_static) {
3115       __ mov(R1, 0);        // NULL object reference
3116     } else {
3117       __ pop(atos);         // Get the object
3118       __ mov(R1, R0_tos);
3119       __ verify_oop(R1);
3120       __ push(atos);        // Restore stack state
3121     }
3122     // R1: object pointer or NULL
3123     // R2: cache entry pointer
3124     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3125                R1, R2);
3126     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3127 
3128     __ bind(Lcontinue);
3129   }
3130 }
3131 
3132 
3133 void TemplateTable::pop_and_check_object(Register r) {
3134   __ pop_ptr(r);
3135   __ null_check(r, Rtemp);  // for field access must check obj.
3136   __ verify_oop(r);
3137 }
3138 
3139 
3140 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3141   transition(vtos, vtos);
3142 
3143   const Register Roffset  = R2_tmp;
3144   const Register Robj     = R3_tmp;
3145   const Register Rcache   = R4_tmp;
3146   const Register Rflagsav = Rtmp_save0;  // R4/R19
3147   const Register Rindex   = R5_tmp;
3148   const Register Rflags   = R5_tmp;
3149 
3150   const bool gen_volatile_check = os::is_MP();
3151 
3152   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3153   jvmti_post_field_access(Rcache, Rindex, is_static, false);
3154   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3155 
3156   if (gen_volatile_check) {
3157     __ mov(Rflagsav, Rflags);
3158   }
3159 
3160   if (!is_static) pop_and_check_object(Robj);
3161 
3162   Label Done, Lint, Ltable, shouldNotReachHere;
3163   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3164 
3165   // compute type
3166   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3167   // Make sure we don't need to mask flags after the above shift
3168   ConstantPoolCacheEntry::verify_tos_state_shift();
3169 
3170   // There are actually two versions of implementation of getfield/getstatic:
3171   //
3172   // 32-bit ARM:
3173   // 1) Table switch using add(PC,...) instruction (fast_version)
3174   // 2) Table switch using ldr(PC,...) instruction
3175   //
3176   // AArch64:
3177   // 1) Table switch using adr/add/br instructions (fast_version)
3178   // 2) Table switch using adr/ldr/br instructions
3179   //
3180   // First version requires fixed size of code block for each case and
3181   // can not be used in RewriteBytecodes and VerifyOops
3182   // modes.
3183 
3184   // Size of fixed size code block for fast_version
3185   const int log_max_block_size = 2;
3186   const int max_block_size = 1 << log_max_block_size;
3187 
3188   // Decide if fast version is enabled
3189   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3190 
3191   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3192   // atos requires additional processing in slow version.
3193   // On AArch64 atos and itos cannot be merged.
3194   bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3195 
3196   assert(number_of_states == 10, "number of tos states should be equal to 9");
3197 
3198   __ cmp(Rflags, itos);
3199 #ifdef AARCH64
3200   __ b(Lint, eq);
3201 
3202   if(fast_version) {
3203     __ adr(Rtemp, Lbtos);
3204     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3205     __ br(Rtemp);
3206   } else {
3207     __ adr(Rtemp, Ltable);
3208     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3209     __ br(Rtemp);
3210   }
3211 #else
3212   if(atos_merged_with_itos) {
3213     __ cmp(Rflags, atos, ne);
3214   }
3215 
3216   // table switch by type
3217   if(fast_version) {
3218     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3219   } else {
3220     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3221   }
3222 
3223   // jump to itos/atos case
3224   __ b(Lint);
3225 #endif // AARCH64
3226 
3227   // table with addresses for slow version
3228   if (fast_version) {
3229     // nothing to do
3230   } else  {
3231     AARCH64_ONLY(__ align(wordSize));
3232     __ bind(Ltable);
3233     __ emit_address(Lbtos);
3234     __ emit_address(Lztos);
3235     __ emit_address(Lctos);
3236     __ emit_address(Lstos);
3237     __ emit_address(Litos);
3238     __ emit_address(Lltos);
3239     __ emit_address(Lftos);
3240     __ emit_address(Ldtos);
3241     __ emit_address(Latos);
3242   }
3243 
3244 #ifdef ASSERT
3245   int seq = 0;
3246 #endif
3247   // btos
3248   {
3249     assert(btos == seq++, "btos has unexpected value");
3250     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3251     __ bind(Lbtos);
3252     __ ldrsb(R0_tos, Address(Robj, Roffset));
3253     __ push(btos);
3254     // Rewrite bytecode to be faster
3255     if (!is_static && rc == may_rewrite) {
3256       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3257     }
3258     __ b(Done);
3259   }
3260 
3261   // ztos (same as btos for getfield)
3262   {
3263     assert(ztos == seq++, "btos has unexpected value");
3264     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3265     __ bind(Lztos);
3266     __ ldrsb(R0_tos, Address(Robj, Roffset));
3267     __ push(ztos);
3268     // Rewrite bytecode to be faster (use btos fast getfield)
3269     if (!is_static && rc == may_rewrite) {
3270       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3271     }
3272     __ b(Done);
3273   }
3274 
3275   // ctos
3276   {
3277     assert(ctos == seq++, "ctos has unexpected value");
3278     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3279     __ bind(Lctos);
3280     __ ldrh(R0_tos, Address(Robj, Roffset));
3281     __ push(ctos);
3282     if (!is_static && rc == may_rewrite) {
3283       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3284     }
3285     __ b(Done);
3286   }
3287 
3288   // stos
3289   {
3290     assert(stos == seq++, "stos has unexpected value");
3291     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3292     __ bind(Lstos);
3293     __ ldrsh(R0_tos, Address(Robj, Roffset));
3294     __ push(stos);
3295     if (!is_static && rc == may_rewrite) {
3296       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3297     }
3298     __ b(Done);
3299   }
3300 
3301   // itos
3302   {
3303     assert(itos == seq++, "itos has unexpected value");
3304     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3305     __ bind(Litos);
3306     __ b(shouldNotReachHere);
3307   }
3308 
3309   // ltos
3310   {
3311     assert(ltos == seq++, "ltos has unexpected value");
3312     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3313     __ bind(Lltos);
3314 #ifdef AARCH64
3315     __ ldr(R0_tos, Address(Robj, Roffset));
3316 #else
3317     __ add(Roffset, Robj, Roffset);
3318     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3319 #endif // AARCH64
3320     __ push(ltos);
3321     if (!is_static && rc == may_rewrite) {
3322       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3323     }
3324     __ b(Done);
3325   }
3326 
3327   // ftos
3328   {
3329     assert(ftos == seq++, "ftos has unexpected value");
3330     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3331     __ bind(Lftos);
3332     // floats and ints are placed on stack in same way, so
3333     // we can use push(itos) to transfer value without using VFP
3334     __ ldr_u32(R0_tos, Address(Robj, Roffset));
3335     __ push(itos);
3336     if (!is_static && rc == may_rewrite) {
3337       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3338     }
3339     __ b(Done);
3340   }
3341 
3342   // dtos
3343   {
3344     assert(dtos == seq++, "dtos has unexpected value");
3345     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3346     __ bind(Ldtos);
3347     // doubles and longs are placed on stack in the same way, so
3348     // we can use push(ltos) to transfer value without using VFP
3349 #ifdef AARCH64
3350     __ ldr(R0_tos, Address(Robj, Roffset));
3351 #else
3352     __ add(Rtemp, Robj, Roffset);
3353     __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3354 #endif // AARCH64
3355     __ push(ltos);
3356     if (!is_static && rc == may_rewrite) {
3357       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3358     }
3359     __ b(Done);
3360   }
3361 
3362   // atos
3363   {
3364     assert(atos == seq++, "atos has unexpected value");
3365 
3366     // atos case for AArch64 and slow version on 32-bit ARM
3367     if(!atos_merged_with_itos) {
3368       __ bind(Latos);
3369       do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3370       __ push(atos);
3371       // Rewrite bytecode to be faster
3372       if (!is_static && rc == may_rewrite) {
3373         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3374       }
3375       __ b(Done);
3376     }
3377   }
3378 
3379   assert(vtos == seq++, "vtos has unexpected value");
3380 
3381   __ bind(shouldNotReachHere);
3382   __ should_not_reach_here();
3383 
3384   // itos and atos cases are frequent so it makes sense to move them out of table switch
3385   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3386 
3387   __ bind(Lint);
3388   __ ldr_s32(R0_tos, Address(Robj, Roffset));
3389   __ push(itos);
3390   // Rewrite bytecode to be faster
3391   if (!is_static && rc == may_rewrite) {
3392     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3393   }
3394 
3395   __ bind(Done);
3396 
3397   if (gen_volatile_check) {
3398     // Check for volatile field
3399     Label notVolatile;
3400     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3401 
3402     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3403 
3404     __ bind(notVolatile);
3405   }
3406 
3407 }
3408 
3409 void TemplateTable::getfield(int byte_no) {
3410   getfield_or_static(byte_no, false);
3411 }
3412 
3413 void TemplateTable::nofast_getfield(int byte_no) {
3414   getfield_or_static(byte_no, false, may_not_rewrite);
3415 }
3416 
3417 void TemplateTable::getstatic(int byte_no) {
3418   getfield_or_static(byte_no, true);
3419 }
3420 
3421 
3422 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3423 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3424 // except cache and index registers which are preserved.
3425 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3426   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3427   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3428 
3429   if (__ can_post_field_modification()) {
3430     // Check to see if a field modification watch has been set before we take
3431     // the time to call into the VM.
3432     Label Lcontinue;
3433 
3434     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3435     __ cbz(Rtemp, Lcontinue);
3436 
3437     if (is_static) {
3438       // Life is simple.  Null out the object pointer.
3439       __ mov(R1, 0);
3440     } else {
3441       // Life is harder. The stack holds the value on top, followed by the object.
3442       // We don't know the size of the value, though; it could be one or two words
3443       // depending on its type. As a result, we must find the type to determine where
3444       // the object is.
3445 
3446       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3447       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3448 
3449       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3450       // Make sure we don't need to mask Rtemp after the above shift
3451       ConstantPoolCacheEntry::verify_tos_state_shift();
3452 
3453       __ cmp(Rtemp, ltos);
3454       __ cond_cmp(Rtemp, dtos, ne);
3455 #ifdef AARCH64
3456       __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3457       __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3458       __ mov(R1, Rtemp, eq);
3459       __ ldr(R1, Address(Rstack_top, R1));
3460 #else
3461       // two word value (ltos/dtos)
3462       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3463 
3464       // one word value (not ltos, dtos)
3465       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3466 #endif // AARCH64
3467     }
3468 
3469     // cache entry pointer
3470     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3471     __ add(R2, R2, in_bytes(cp_base_offset));
3472 
3473     // object (tos)
3474     __ mov(R3, Rstack_top);
3475 
3476     // R1: object pointer set up above (NULL if static)
3477     // R2: cache entry pointer
3478     // R3: value object on the stack
3479     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3480                R1, R2, R3);
3481     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3482 
3483     __ bind(Lcontinue);
3484   }
3485 }
3486 
3487 
3488 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3489   transition(vtos, vtos);
3490 
3491   const Register Roffset  = R2_tmp;
3492   const Register Robj     = R3_tmp;
3493   const Register Rcache   = R4_tmp;
3494   const Register Rflagsav = Rtmp_save0;  // R4/R19
3495   const Register Rindex   = R5_tmp;
3496   const Register Rflags   = R5_tmp;
3497 
3498   const bool gen_volatile_check = os::is_MP();
3499 
3500   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3501   jvmti_post_field_mod(Rcache, Rindex, is_static);
3502   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3503 
3504   if (gen_volatile_check) {
3505     // Check for volatile field
3506     Label notVolatile;
3507     __ mov(Rflagsav, Rflags);
3508     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3509 
3510     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3511 
3512     __ bind(notVolatile);
3513   }
3514 
3515   Label Done, Lint, shouldNotReachHere;
3516   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3517 
3518   // compute type
3519   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3520   // Make sure we don't need to mask flags after the above shift
3521   ConstantPoolCacheEntry::verify_tos_state_shift();
3522 
3523   // There are actually two versions of implementation of putfield/putstatic:
3524   //
3525   // 32-bit ARM:
3526   // 1) Table switch using add(PC,...) instruction (fast_version)
3527   // 2) Table switch using ldr(PC,...) instruction
3528   //
3529   // AArch64:
3530   // 1) Table switch using adr/add/br instructions (fast_version)
3531   // 2) Table switch using adr/ldr/br instructions
3532   //
3533   // First version requires fixed size of code block for each case and
3534   // can not be used in RewriteBytecodes and VerifyOops
3535   // modes.
3536 
3537   // Size of fixed size code block for fast_version (in instructions)
3538   const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3539   const int max_block_size = 1 << log_max_block_size;
3540 
3541   // Decide if fast version is enabled
3542   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3543 
3544   assert(number_of_states == 10, "number of tos states should be equal to 9");
3545 
3546   // itos case is frequent and is moved outside table switch
3547   __ cmp(Rflags, itos);
3548 
3549 #ifdef AARCH64
3550   __ b(Lint, eq);
3551 
3552   if (fast_version) {
3553     __ adr(Rtemp, Lbtos);
3554     __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3555     __ br(Rtemp);
3556   } else {
3557     __ adr(Rtemp, Ltable);
3558     __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3559     __ br(Rtemp);
3560   }
3561 #else
3562   // table switch by type
3563   if (fast_version) {
3564     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3565   } else  {
3566     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3567   }
3568 
3569   // jump to itos case
3570   __ b(Lint);
3571 #endif // AARCH64
3572 
3573   // table with addresses for slow version
3574   if (fast_version) {
3575     // nothing to do
3576   } else  {
3577     AARCH64_ONLY(__ align(wordSize));
3578     __ bind(Ltable);
3579     __ emit_address(Lbtos);
3580     __ emit_address(Lztos);
3581     __ emit_address(Lctos);
3582     __ emit_address(Lstos);
3583     __ emit_address(Litos);
3584     __ emit_address(Lltos);
3585     __ emit_address(Lftos);
3586     __ emit_address(Ldtos);
3587     __ emit_address(Latos);
3588   }
3589 
3590 #ifdef ASSERT
3591   int seq = 0;
3592 #endif
3593   // btos
3594   {
3595     assert(btos == seq++, "btos has unexpected value");
3596     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3597     __ bind(Lbtos);
3598     __ pop(btos);
3599     if (!is_static) pop_and_check_object(Robj);
3600     __ strb(R0_tos, Address(Robj, Roffset));
3601     if (!is_static && rc == may_rewrite) {
3602       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3603     }
3604     __ b(Done);
3605   }
3606 
3607   // ztos
3608   {
3609     assert(ztos == seq++, "ztos has unexpected value");
3610     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3611     __ bind(Lztos);
3612     __ pop(ztos);
3613     if (!is_static) pop_and_check_object(Robj);
3614     __ and_32(R0_tos, R0_tos, 1);
3615     __ strb(R0_tos, Address(Robj, Roffset));
3616     if (!is_static && rc == may_rewrite) {
3617       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3618     }
3619     __ b(Done);
3620   }
3621 
3622   // ctos
3623   {
3624     assert(ctos == seq++, "ctos has unexpected value");
3625     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3626     __ bind(Lctos);
3627     __ pop(ctos);
3628     if (!is_static) pop_and_check_object(Robj);
3629     __ strh(R0_tos, Address(Robj, Roffset));
3630     if (!is_static && rc == may_rewrite) {
3631       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3632     }
3633     __ b(Done);
3634   }
3635 
3636   // stos
3637   {
3638     assert(stos == seq++, "stos has unexpected value");
3639     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3640     __ bind(Lstos);
3641     __ pop(stos);
3642     if (!is_static) pop_and_check_object(Robj);
3643     __ strh(R0_tos, Address(Robj, Roffset));
3644     if (!is_static && rc == may_rewrite) {
3645       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3646     }
3647     __ b(Done);
3648   }
3649 
3650   // itos
3651   {
3652     assert(itos == seq++, "itos has unexpected value");
3653     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3654     __ bind(Litos);
3655     __ b(shouldNotReachHere);
3656   }
3657 
3658   // ltos
3659   {
3660     assert(ltos == seq++, "ltos has unexpected value");
3661     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3662     __ bind(Lltos);
3663     __ pop(ltos);
3664     if (!is_static) pop_and_check_object(Robj);
3665 #ifdef AARCH64
3666     __ str(R0_tos, Address(Robj, Roffset));
3667 #else
3668     __ add(Roffset, Robj, Roffset);
3669     __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3670 #endif // AARCH64
3671     if (!is_static && rc == may_rewrite) {
3672       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3673     }
3674     __ b(Done);
3675   }
3676 
3677   // ftos
3678   {
3679     assert(ftos == seq++, "ftos has unexpected value");
3680     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3681     __ bind(Lftos);
3682     // floats and ints are placed on stack in the same way, so
3683     // we can use pop(itos) to transfer value without using VFP
3684     __ pop(itos);
3685     if (!is_static) pop_and_check_object(Robj);
3686     __ str_32(R0_tos, Address(Robj, Roffset));
3687     if (!is_static && rc == may_rewrite) {
3688       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3689     }
3690     __ b(Done);
3691   }
3692 
3693   // dtos
3694   {
3695     assert(dtos == seq++, "dtos has unexpected value");
3696     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3697     __ bind(Ldtos);
3698     // doubles and longs are placed on stack in the same way, so
3699     // we can use pop(ltos) to transfer value without using VFP
3700     __ pop(ltos);
3701     if (!is_static) pop_and_check_object(Robj);
3702 #ifdef AARCH64
3703     __ str(R0_tos, Address(Robj, Roffset));
3704 #else
3705     __ add(Rtemp, Robj, Roffset);
3706     __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3707 #endif // AARCH64
3708     if (!is_static && rc == may_rewrite) {
3709       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3710     }
3711     __ b(Done);
3712   }
3713 
3714   // atos
3715   {
3716     assert(atos == seq++, "dtos has unexpected value");
3717     __ bind(Latos);
3718     __ pop(atos);
3719     if (!is_static) pop_and_check_object(Robj);
3720     // Store into the field
3721     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3722     if (!is_static && rc == may_rewrite) {
3723       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3724     }
3725     __ b(Done);
3726   }
3727 
3728   __ bind(shouldNotReachHere);
3729   __ should_not_reach_here();
3730 
3731   // itos case is frequent and is moved outside table switch
3732   __ bind(Lint);
3733   __ pop(itos);
3734   if (!is_static) pop_and_check_object(Robj);
3735   __ str_32(R0_tos, Address(Robj, Roffset));
3736   if (!is_static && rc == may_rewrite) {
3737     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3738   }
3739 
3740   __ bind(Done);
3741 
3742   if (gen_volatile_check) {
3743     Label notVolatile;
3744     if (is_static) {
3745       // Just check for volatile. Memory barrier for static final field
3746       // is handled by class initialization.
3747       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3748       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3749       __ bind(notVolatile);
3750     } else {
3751       // Check for volatile field and final field
3752       Label skipMembar;
3753 
3754       __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3755                        1 << ConstantPoolCacheEntry::is_final_shift);
3756       __ b(skipMembar, eq);
3757 
3758       __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3759 
3760       // StoreLoad barrier after volatile field write
3761       volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3762       __ b(skipMembar);
3763 
3764       // StoreStore barrier after final field write
3765       __ bind(notVolatile);
3766       volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3767 
3768       __ bind(skipMembar);
3769     }
3770   }
3771 
3772 }
3773 
3774 void TemplateTable::putfield(int byte_no) {
3775   putfield_or_static(byte_no, false);
3776 }
3777 
3778 void TemplateTable::nofast_putfield(int byte_no) {
3779   putfield_or_static(byte_no, false, may_not_rewrite);
3780 }
3781 
3782 void TemplateTable::putstatic(int byte_no) {
3783   putfield_or_static(byte_no, true);
3784 }
3785 
3786 
3787 void TemplateTable::jvmti_post_fast_field_mod() {
3788   // This version of jvmti_post_fast_field_mod() is not used on ARM
3789   Unimplemented();
3790 }
3791 
3792 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3793 // but preserves tosca with the given state.
3794 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3795   if (__ can_post_field_modification()) {
3796     // Check to see if a field modification watch has been set before we take
3797     // the time to call into the VM.
3798     Label done;
3799 
3800     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3801     __ cbz(R2, done);
3802 
3803     __ pop_ptr(R3);               // copy the object pointer from tos
3804     __ verify_oop(R3);
3805     __ push_ptr(R3);              // put the object pointer back on tos
3806 
3807     __ push(state);               // save value on the stack
3808 
3809     // access constant pool cache entry
3810     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3811 
3812     __ mov(R1, R3);
3813     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3814     __ mov(R3, Rstack_top); // put tos addr into R3
3815 
3816     // R1: object pointer copied above
3817     // R2: cache entry pointer
3818     // R3: jvalue object on the stack
3819     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3820 
3821     __ pop(state);                // restore value
3822 
3823     __ bind(done);
3824   }
3825 }
3826 
3827 
3828 void TemplateTable::fast_storefield(TosState state) {
3829   transition(state, vtos);
3830 
3831   ByteSize base = ConstantPoolCache::base_offset();
3832 
3833   jvmti_post_fast_field_mod(state);
3834 
3835   const Register Rcache  = R2_tmp;
3836   const Register Rindex  = R3_tmp;
3837   const Register Roffset = R3_tmp;
3838   const Register Rflags  = Rtmp_save0; // R4/R19
3839   const Register Robj    = R5_tmp;
3840 
3841   const bool gen_volatile_check = os::is_MP();
3842 
3843   // access constant pool cache
3844   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3845 
3846   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3847 
3848   if (gen_volatile_check) {
3849     // load flags to test volatile
3850     __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3851   }
3852 
3853   // replace index with field offset from cache entry
3854   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3855 
3856   if (gen_volatile_check) {
3857     // Check for volatile store
3858     Label notVolatile;
3859     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3860 
3861     // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3862     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3863 
3864     __ bind(notVolatile);
3865   }
3866 
3867   // Get object from stack
3868   pop_and_check_object(Robj);
3869 
3870   // access field
3871   switch (bytecode()) {
3872     case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3873                                      // fall through
3874     case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3875     case Bytecodes::_fast_sputfield: // fall through
3876     case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3877     case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3878 #ifdef AARCH64
3879     case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
3880     case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3881     case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3882 #else
3883     case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3884                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3885 
3886 #ifdef __SOFTFP__
3887     case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
3888     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3889                                      __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3890 #else
3891     case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3892                                      __ fsts(S0_tos, Address(Robj));          break;
3893     case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3894                                      __ fstd(D0_tos, Address(Robj));          break;
3895 #endif // __SOFTFP__
3896 #endif // AARCH64
3897 
3898     case Bytecodes::_fast_aputfield:
3899       do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3900       break;
3901 
3902     default:
3903       ShouldNotReachHere();
3904   }
3905 
3906   if (gen_volatile_check) {
3907     Label notVolatile;
3908     Label skipMembar;
3909     __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3910                    1 << ConstantPoolCacheEntry::is_final_shift);
3911     __ b(skipMembar, eq);
3912 
3913     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3914 
3915     // StoreLoad barrier after volatile field write
3916     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3917     __ b(skipMembar);
3918 
3919     // StoreStore barrier after final field write
3920     __ bind(notVolatile);
3921     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3922 
3923     __ bind(skipMembar);
3924   }
3925 }
3926 
3927 
3928 void TemplateTable::fast_accessfield(TosState state) {
3929   transition(atos, state);
3930 
3931   // do the JVMTI work here to avoid disturbing the register state below
3932   if (__ can_post_field_access()) {
3933     // Check to see if a field access watch has been set before we take
3934     // the time to call into the VM.
3935     Label done;
3936     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3937     __ cbz(R2, done);
3938     // access constant pool cache entry
3939     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3940     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3941     __ verify_oop(R0_tos);
3942     __ mov(R1, R0_tos);
3943     // R1: object pointer copied above
3944     // R2: cache entry pointer
3945     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3946     __ pop_ptr(R0_tos);   // restore object pointer
3947 
3948     __ bind(done);
3949   }
3950 
3951   const Register Robj    = R0_tos;
3952   const Register Rcache  = R2_tmp;
3953   const Register Rflags  = R2_tmp;
3954   const Register Rindex  = R3_tmp;
3955   const Register Roffset = R3_tmp;
3956 
3957   const bool gen_volatile_check = os::is_MP();
3958 
3959   // access constant pool cache
3960   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3961   // replace index with field offset from cache entry
3962   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3963   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3964 
3965   if (gen_volatile_check) {
3966     // load flags to test volatile
3967     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3968   }
3969 
3970   __ verify_oop(Robj);
3971   __ null_check(Robj, Rtemp);
3972 
3973   // access field
3974   switch (bytecode()) {
3975     case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
3976     case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
3977     case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
3978     case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
3979 #ifdef AARCH64
3980     case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3981     case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
3982     case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
3983 #else
3984     case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
3985                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3986 #ifdef __SOFTFP__
3987     case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
3988     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
3989                                      __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3990 #else
3991     case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
3992     case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
3993 #endif // __SOFTFP__
3994 #endif // AARCH64
3995     case Bytecodes::_fast_agetfield: do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
3996     default:
3997       ShouldNotReachHere();
3998   }
3999 
4000   if (gen_volatile_check) {
4001     // Check for volatile load
4002     Label notVolatile;
4003     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4004 
4005     // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
4006     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4007 
4008     __ bind(notVolatile);
4009   }
4010 }
4011 
4012 
4013 void TemplateTable::fast_xaccess(TosState state) {
4014   transition(vtos, state);
4015 
4016   const Register Robj = R1_tmp;
4017   const Register Rcache = R2_tmp;
4018   const Register Rindex = R3_tmp;
4019   const Register Roffset = R3_tmp;
4020   const Register Rflags = R4_tmp;
4021   Label done;
4022 
4023   // get receiver
4024   __ ldr(Robj, aaddress(0));
4025 
4026   // access constant pool cache
4027   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
4028   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
4029   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
4030 
4031   const bool gen_volatile_check = os::is_MP();
4032 
4033   if (gen_volatile_check) {
4034     // load flags to test volatile
4035     __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
4036   }
4037 
4038   // make sure exception is reported in correct bcp range (getfield is next instruction)
4039   __ add(Rbcp, Rbcp, 1);
4040   __ null_check(Robj, Rtemp);
4041   __ sub(Rbcp, Rbcp, 1);
4042 
4043 #ifdef AARCH64
4044   if (gen_volatile_check) {
4045     Label notVolatile;
4046     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4047 
4048     __ add(Rtemp, Robj, Roffset);
4049 
4050     if (state == itos) {
4051       __ ldar_w(R0_tos, Rtemp);
4052     } else if (state == atos) {
4053       if (UseCompressedOops) {
4054         __ ldar_w(R0_tos, Rtemp);
4055         __ decode_heap_oop(R0_tos);
4056       } else {
4057         __ ldar(R0_tos, Rtemp);
4058       }
4059       __ verify_oop(R0_tos);
4060     } else if (state == ftos) {
4061       __ ldar_w(R0_tos, Rtemp);
4062       __ fmov_sw(S0_tos, R0_tos);
4063     } else {
4064       ShouldNotReachHere();
4065     }
4066     __ b(done);
4067 
4068     __ bind(notVolatile);
4069   }
4070 #endif // AARCH64
4071 
4072   if (state == itos) {
4073     __ ldr_s32(R0_tos, Address(Robj, Roffset));
4074   } else if (state == atos) {
4075     do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
4076     __ verify_oop(R0_tos);
4077   } else if (state == ftos) {
4078 #ifdef AARCH64
4079     __ ldr_s(S0_tos, Address(Robj, Roffset));
4080 #else
4081 #ifdef __SOFTFP__
4082     __ ldr(R0_tos, Address(Robj, Roffset));
4083 #else
4084     __ add(Roffset, Robj, Roffset);
4085     __ flds(S0_tos, Address(Roffset));
4086 #endif // __SOFTFP__
4087 #endif // AARCH64
4088   } else {
4089     ShouldNotReachHere();
4090   }
4091 
4092 #ifndef AARCH64
4093   if (gen_volatile_check) {
4094     // Check for volatile load
4095     Label notVolatile;
4096     __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4097 
4098     volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4099 
4100     __ bind(notVolatile);
4101   }
4102 #endif // !AARCH64
4103 
4104   __ bind(done);
4105 }
4106 
4107 
4108 
4109 //----------------------------------------------------------------------------------------------------
4110 // Calls
4111 
4112 void TemplateTable::count_calls(Register method, Register temp) {
4113   // implemented elsewhere
4114   ShouldNotReachHere();
4115 }
4116 
4117 
4118 void TemplateTable::prepare_invoke(int byte_no,
4119                                    Register method,  // linked method (or i-klass)
4120                                    Register index,   // itable index, MethodType, etc.
4121                                    Register recv,    // if caller wants to see it
4122                                    Register flags    // if caller wants to test it
4123                                    ) {
4124   // determine flags
4125   const Bytecodes::Code code = bytecode();
4126   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
4127   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
4128   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
4129   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
4130   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
4131   const bool load_receiver       = (recv != noreg);
4132   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4133   assert(recv  == noreg || recv  == R2, "");
4134   assert(flags == noreg || flags == R3, "");
4135 
4136   // setup registers & access constant pool cache
4137   if (recv  == noreg)  recv  = R2;
4138   if (flags == noreg)  flags = R3;
4139   const Register temp = Rtemp;
4140   const Register ret_type = R1_tmp;
4141   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4142 
4143   // save 'interpreter return address'
4144   __ save_bcp();
4145 
4146   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4147 
4148   // maybe push extra argument
4149   if (is_invokedynamic || is_invokehandle) {
4150     Label L_no_push;
4151     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4152     __ mov(temp, index);
4153     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4154     __ load_resolved_reference_at_index(index, temp);
4155     __ verify_oop(index);
4156     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
4157     __ bind(L_no_push);
4158   }
4159 
4160   // load receiver if needed (after extra argument is pushed so parameter size is correct)
4161   if (load_receiver) {
4162     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
4163     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4164     __ ldr(recv, recv_addr);
4165     __ verify_oop(recv);
4166   }
4167 
4168   // compute return type
4169   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4170   // Make sure we don't need to mask flags after the above shift
4171   ConstantPoolCacheEntry::verify_tos_state_shift();
4172   // load return address
4173   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4174     __ mov_slow(temp, table);
4175     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4176   }
4177 }
4178 
4179 
4180 void TemplateTable::invokevirtual_helper(Register index,
4181                                          Register recv,
4182                                          Register flags) {
4183 
4184   const Register recv_klass = R2_tmp;
4185 
4186   assert_different_registers(index, recv, flags, Rtemp);
4187   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4188 
4189   // Test for an invoke of a final method
4190   Label notFinal;
4191   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4192 
4193   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4194 
4195   // do the call - the index is actually the method to call
4196 
4197   // It's final, need a null check here!
4198   __ null_check(recv, Rtemp);
4199 
4200   // profile this call
4201   __ profile_final_call(R0_tmp);
4202 
4203   __ jump_from_interpreted(Rmethod);
4204 
4205   __ bind(notFinal);
4206 
4207   // get receiver klass
4208   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4209   __ load_klass(recv_klass, recv);
4210 
4211   // profile this call
4212   __ profile_virtual_call(R0_tmp, recv_klass);
4213 
4214   // get target Method* & entry point
4215   const int base = in_bytes(Klass::vtable_start_offset());
4216   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4217   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4218   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4219   __ jump_from_interpreted(Rmethod);
4220 }
4221 
4222 void TemplateTable::invokevirtual(int byte_no) {
4223   transition(vtos, vtos);
4224   assert(byte_no == f2_byte, "use this argument");
4225 
4226   const Register Rrecv  = R2_tmp;
4227   const Register Rflags = R3_tmp;
4228 
4229   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4230 
4231   // Rmethod: index
4232   // Rrecv:   receiver
4233   // Rflags:  flags
4234   // LR:      return address
4235 
4236   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4237 }
4238 
4239 
4240 void TemplateTable::invokespecial(int byte_no) {
4241   transition(vtos, vtos);
4242   assert(byte_no == f1_byte, "use this argument");
4243   const Register Rrecv  = R2_tmp;
4244   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4245   __ verify_oop(Rrecv);
4246   __ null_check(Rrecv, Rtemp);
4247   // do the call
4248   __ profile_call(Rrecv);
4249   __ jump_from_interpreted(Rmethod);
4250 }
4251 
4252 
4253 void TemplateTable::invokestatic(int byte_no) {
4254   transition(vtos, vtos);
4255   assert(byte_no == f1_byte, "use this argument");
4256   prepare_invoke(byte_no, Rmethod);
4257   // do the call
4258   __ profile_call(R2_tmp);
4259   __ jump_from_interpreted(Rmethod);
4260 }
4261 
4262 
4263 void TemplateTable::fast_invokevfinal(int byte_no) {
4264   transition(vtos, vtos);
4265   assert(byte_no == f2_byte, "use this argument");
4266   __ stop("fast_invokevfinal is not used on ARM");
4267 }
4268 
4269 
4270 void TemplateTable::invokeinterface(int byte_no) {
4271   transition(vtos, vtos);
4272   assert(byte_no == f1_byte, "use this argument");
4273 
4274   const Register Ritable = R1_tmp;
4275   const Register Rrecv   = R2_tmp;
4276   const Register Rinterf = R5_tmp;
4277   const Register Rindex  = R4_tmp;
4278   const Register Rflags  = R3_tmp;
4279   const Register Rklass  = R2_tmp; // Note! Same register with Rrecv
4280 
4281   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4282 
4283   // First check for Object case, then private interface method,
4284   // then regular interface method.
4285 
4286   // Special case of invokeinterface called for virtual method of
4287   // java.lang.Object.  See cpCache.cpp for details.
4288   Label notObjectMethod;
4289   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
4290   invokevirtual_helper(Rmethod, Rrecv, Rflags);
4291   __ bind(notObjectMethod);
4292 
4293   // Get receiver klass into Rklass - also a null check
4294   __ load_klass(Rklass, Rrecv);
4295 
4296   // Check for private method invocation - indicated by vfinal
4297   Label no_such_interface;
4298 
4299   Label notVFinal;
4300   __ tbz(Rflags, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
4301 
4302   Label subtype;
4303   __ check_klass_subtype(Rklass, Rinterf, R1_tmp, R3_tmp, noreg, subtype);
4304   // If we get here the typecheck failed
4305   __ b(no_such_interface);
4306   __ bind(subtype);
4307 
4308   // do the call
4309   __ profile_final_call(R0_tmp);
4310   __ jump_from_interpreted(Rmethod);
4311 
4312   __ bind(notVFinal);
4313 
4314   // Receiver subtype check against REFC.
4315   __ lookup_interface_method(// inputs: rec. class, interface
4316                              Rklass, Rinterf, noreg,
4317                              // outputs:  scan temp. reg1, scan temp. reg2
4318                              noreg, Ritable, Rtemp,
4319                              no_such_interface);
4320 
4321   // profile this call
4322   __ profile_virtual_call(R0_tmp, Rklass);
4323 
4324   // Get declaring interface class from method
4325   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4326   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4327   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4328 
4329   // Get itable index from method
4330   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4331   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4332   __ neg(Rindex, Rtemp);
4333 
4334   __ lookup_interface_method(// inputs: rec. class, interface
4335                              Rklass, Rinterf, Rindex,
4336                              // outputs:  scan temp. reg1, scan temp. reg2
4337                              Rmethod, Ritable, Rtemp,
4338                              no_such_interface);
4339 
4340   // Rmethod: Method* to call
4341 
4342   // Check for abstract method error
4343   // Note: This should be done more efficiently via a throw_abstract_method_error
4344   //       interpreter entry point and a conditional jump to it in case of a null
4345   //       method.
4346   { Label L;
4347     __ cbnz(Rmethod, L);
4348     // throw exception
4349     // note: must restore interpreter registers to canonical
4350     //       state for exception handling to work correctly!
4351     __ restore_method();
4352     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4353     // the call_VM checks for exception, so we should never return here.
4354     __ should_not_reach_here();
4355     __ bind(L);
4356   }
4357 
4358   // do the call
4359   __ jump_from_interpreted(Rmethod);
4360 
4361   // throw exception
4362   __ bind(no_such_interface);
4363   __ restore_method();
4364   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4365   // the call_VM checks for exception, so we should never return here.
4366   __ should_not_reach_here();
4367 }
4368 
4369 void TemplateTable::invokehandle(int byte_no) {
4370   transition(vtos, vtos);
4371 
4372   // TODO-AARCH64 review register usage
4373   const Register Rrecv  = R2_tmp;
4374   const Register Rmtype = R4_tmp;
4375   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4376 
4377   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4378   __ null_check(Rrecv, Rtemp);
4379 
4380   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
4381   // Rmethod: MH.invokeExact_MT method (from f2)
4382 
4383   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
4384 
4385   // do the call
4386   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
4387   __ mov(Rmethod, R5_method);
4388   __ jump_from_interpreted(Rmethod);
4389 }
4390 
4391 void TemplateTable::invokedynamic(int byte_no) {
4392   transition(vtos, vtos);
4393 
4394   // TODO-AARCH64 review register usage
4395   const Register Rcallsite = R4_tmp;
4396   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
4397 
4398   prepare_invoke(byte_no, R5_method, Rcallsite);
4399 
4400   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4401   // Rmethod:   MH.linkToCallSite method (from f2)
4402 
4403   // Note:  Rcallsite is already pushed by prepare_invoke
4404 
4405   if (ProfileInterpreter) {
4406     __ profile_call(R2_tmp);
4407   }
4408 
4409   // do the call
4410   __ mov(Rmethod, R5_method);
4411   __ jump_from_interpreted(Rmethod);
4412 }
4413 
4414 //----------------------------------------------------------------------------------------------------
4415 // Allocation
4416 
4417 void TemplateTable::_new() {
4418   transition(vtos, atos);
4419 
4420   const Register Robj   = R0_tos;
4421   const Register Rcpool = R1_tmp;
4422   const Register Rindex = R2_tmp;
4423   const Register Rtags  = R3_tmp;
4424   const Register Rsize  = R3_tmp;
4425 
4426   Register Rklass = R4_tmp;
4427   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4428   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4429 
4430   Label slow_case;
4431   Label done;
4432   Label initialize_header;
4433   Label initialize_object;  // including clearing the fields
4434 
4435   const bool allow_shared_alloc =
4436     Universe::heap()->supports_inline_contig_alloc();
4437 
4438   // Literals
4439   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4440 
4441   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4442   __ get_cpool_and_tags(Rcpool, Rtags);
4443 
4444   // Make sure the class we're about to instantiate has been resolved.
4445   // This is done before loading InstanceKlass to be consistent with the order
4446   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4447   const int tags_offset = Array<u1>::base_offset_in_bytes();
4448   __ add(Rtemp, Rtags, Rindex);
4449 
4450 #ifdef AARCH64
4451   __ add(Rtemp, Rtemp, tags_offset);
4452   __ ldarb(Rtemp, Rtemp);
4453 #else
4454   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4455 
4456   // use Rklass as a scratch
4457   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4458 #endif // AARCH64
4459 
4460   // get InstanceKlass
4461   __ cmp(Rtemp, JVM_CONSTANT_Class);
4462   __ b(slow_case, ne);
4463   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4464 
4465   // make sure klass is initialized & doesn't have finalizer
4466   // make sure klass is fully initialized
4467   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4468   __ cmp(Rtemp, InstanceKlass::fully_initialized);
4469   __ b(slow_case, ne);
4470 
4471   // get instance_size in InstanceKlass (scaled to a count of bytes)
4472   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4473 
4474   // test to see if it has a finalizer or is malformed in some way
4475   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4476   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4477 
4478   // Allocate the instance:
4479   //  If TLAB is enabled:
4480   //    Try to allocate in the TLAB.
4481   //    If fails, go to the slow path.
4482   //  Else If inline contiguous allocations are enabled:
4483   //    Try to allocate in eden.
4484   //    If fails due to heap end, go to slow path.
4485   //
4486   //  If TLAB is enabled OR inline contiguous is enabled:
4487   //    Initialize the allocation.
4488   //    Exit.
4489   //
4490   //  Go to slow path.
4491   if (UseTLAB) {
4492     const Register Rtlab_top = R1_tmp;
4493     const Register Rtlab_end = R2_tmp;
4494     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4495 
4496     __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4497     __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset())));
4498     __ add(Rtlab_top, Robj, Rsize);
4499     __ cmp(Rtlab_top, Rtlab_end);
4500     __ b(slow_case, hi);
4501     __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4502     if (ZeroTLAB) {
4503       // the fields have been already cleared
4504       __ b(initialize_header);
4505     } else {
4506       // initialize both the header and fields
4507       __ b(initialize_object);
4508     }
4509   } else {
4510     // Allocation in the shared Eden, if allowed.
4511     if (allow_shared_alloc) {
4512       const Register Rheap_top_addr = R2_tmp;
4513       const Register Rheap_top = R5_tmp;
4514       const Register Rheap_end = Rtemp;
4515       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4516 
4517       // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4518       __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4519 
4520       Label retry;
4521       __ bind(retry);
4522 
4523 #ifdef AARCH64
4524       __ ldxr(Robj, Rheap_top_addr);
4525 #else
4526       __ ldr(Robj, Address(Rheap_top_addr));
4527 #endif // AARCH64
4528 
4529       __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4530       __ add(Rheap_top, Robj, Rsize);
4531       __ cmp(Rheap_top, Rheap_end);
4532       __ b(slow_case, hi);
4533 
4534       // Update heap top atomically.
4535       // If someone beats us on the allocation, try again, otherwise continue.
4536 #ifdef AARCH64
4537       __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4538       __ cbnz_w(Rtemp2, retry);
4539 #else
4540       __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4541       __ b(retry, ne);
4542 #endif // AARCH64
4543 
4544       __ incr_allocated_bytes(Rsize, Rtemp);
4545     }
4546   }
4547 
4548   if (UseTLAB || allow_shared_alloc) {
4549     const Register Rzero0 = R1_tmp;
4550     const Register Rzero1 = R2_tmp;
4551     const Register Rzero_end = R5_tmp;
4552     const Register Rzero_cur = Rtemp;
4553     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4554 
4555     // The object is initialized before the header.  If the object size is
4556     // zero, go directly to the header initialization.
4557     __ bind(initialize_object);
4558     __ subs(Rsize, Rsize, sizeof(oopDesc));
4559     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4560     __ b(initialize_header, eq);
4561 
4562 #ifdef ASSERT
4563     // make sure Rsize is a multiple of 8
4564     Label L;
4565     __ tst(Rsize, 0x07);
4566     __ b(L, eq);
4567     __ stop("object size is not multiple of 8 - adjust this code");
4568     __ bind(L);
4569 #endif
4570 
4571 #ifdef AARCH64
4572     {
4573       Label loop;
4574       // Step back by 1 word if object size is not a multiple of 2*wordSize.
4575       assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4576       __ andr(Rtemp2, Rsize, (uintx)wordSize);
4577       __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4578 
4579       // Zero by 2 words per iteration.
4580       __ bind(loop);
4581       __ subs(Rsize, Rsize, 2*wordSize);
4582       __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4583       __ b(loop, gt);
4584     }
4585 #else
4586     __ mov(Rzero0, 0);
4587     __ mov(Rzero1, 0);
4588     __ add(Rzero_end, Rzero_cur, Rsize);
4589 
4590     // initialize remaining object fields: Rsize was a multiple of 8
4591     { Label loop;
4592       // loop is unrolled 2 times
4593       __ bind(loop);
4594       // #1
4595       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4596       __ cmp(Rzero_cur, Rzero_end);
4597       // #2
4598       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4599       __ cmp(Rzero_cur, Rzero_end, ne);
4600       __ b(loop, ne);
4601     }
4602 #endif // AARCH64
4603 
4604     // initialize object header only.
4605     __ bind(initialize_header);
4606     if (UseBiasedLocking) {
4607       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4608     } else {
4609       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4610     }
4611     // mark
4612     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4613 
4614     // klass
4615 #ifdef AARCH64
4616     __ store_klass_gap(Robj);
4617 #endif // AARCH64
4618     __ store_klass(Rklass, Robj); // blows Rklass:
4619     Rklass = noreg;
4620 
4621     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4622     if (DTraceAllocProbes) {
4623       // Trigger dtrace event for fastpath
4624       Label Lcontinue;
4625 
4626       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4627       __ cbz(Rtemp, Lcontinue);
4628 
4629       __ push(atos);
4630       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4631       __ pop(atos);
4632 
4633       __ bind(Lcontinue);
4634     }
4635 
4636     __ b(done);
4637   } else {
4638     // jump over literals
4639     __ b(slow_case);
4640   }
4641 
4642   if (allow_shared_alloc) {
4643     __ bind_literal(Lheap_top_addr);
4644   }
4645 
4646   // slow case
4647   __ bind(slow_case);
4648   __ get_constant_pool(Rcpool);
4649   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4650   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4651 
4652   // continue
4653   __ bind(done);
4654 
4655   // StoreStore barrier required after complete initialization
4656   // (headers + content zeroing), before the object may escape.
4657   __ membar(MacroAssembler::StoreStore, R1_tmp);
4658 }
4659 
4660 
4661 void TemplateTable::newarray() {
4662   transition(itos, atos);
4663   __ ldrb(R1, at_bcp(1));
4664   __ mov(R2, R0_tos);
4665   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4666   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4667 }
4668 
4669 
4670 void TemplateTable::anewarray() {
4671   transition(itos, atos);
4672   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4673   __ get_constant_pool(R1);
4674   __ mov(R3, R0_tos);
4675   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4676   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4677 }
4678 
4679 
4680 void TemplateTable::arraylength() {
4681   transition(atos, itos);
4682   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4683   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4684 }
4685 
4686 
4687 void TemplateTable::checkcast() {
4688   transition(atos, atos);
4689   Label done, is_null, quicked, resolved, throw_exception;
4690 
4691   const Register Robj = R0_tos;
4692   const Register Rcpool = R2_tmp;
4693   const Register Rtags = R3_tmp;
4694   const Register Rindex = R4_tmp;
4695   const Register Rsuper = R3_tmp;
4696   const Register Rsub   = R4_tmp;
4697   const Register Rsubtype_check_tmp1 = R1_tmp;
4698   const Register Rsubtype_check_tmp2 = LR_tmp;
4699 
4700   __ cbz(Robj, is_null);
4701 
4702   // Get cpool & tags index
4703   __ get_cpool_and_tags(Rcpool, Rtags);
4704   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4705 
4706   // See if bytecode has already been quicked
4707   __ add(Rtemp, Rtags, Rindex);
4708 #ifdef AARCH64
4709   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4710   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4711   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4712 #else
4713   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4714 #endif // AARCH64
4715 
4716   __ cmp(Rtemp, JVM_CONSTANT_Class);
4717 
4718 #ifndef AARCH64
4719   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4720 #endif // !AARCH64
4721 
4722   __ b(quicked, eq);
4723 
4724   __ push(atos);
4725   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4726   // vm_result_2 has metadata result
4727   __ get_vm_result_2(Rsuper, Robj);
4728   __ pop_ptr(Robj);
4729   __ b(resolved);
4730 
4731   __ bind(throw_exception);
4732   // Come here on failure of subtype check
4733   __ profile_typecheck_failed(R1_tmp);
4734   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4735   __ b(Interpreter::_throw_ClassCastException_entry);
4736 
4737   // Get superklass in Rsuper and subklass in Rsub
4738   __ bind(quicked);
4739   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4740 
4741   __ bind(resolved);
4742   __ load_klass(Rsub, Robj);
4743 
4744   // Generate subtype check. Blows both tmps and Rtemp.
4745   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4746   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4747 
4748   // Come here on success
4749 
4750   // Collect counts on whether this check-cast sees NULLs a lot or not.
4751   if (ProfileInterpreter) {
4752     __ b(done);
4753     __ bind(is_null);
4754     __ profile_null_seen(R1_tmp);
4755   } else {
4756     __ bind(is_null);   // same as 'done'
4757   }
4758   __ bind(done);
4759 }
4760 
4761 
4762 void TemplateTable::instanceof() {
4763   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4764   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4765 
4766   transition(atos, itos);
4767   Label done, is_null, not_subtype, quicked, resolved;
4768 
4769   const Register Robj = R0_tos;
4770   const Register Rcpool = R2_tmp;
4771   const Register Rtags = R3_tmp;
4772   const Register Rindex = R4_tmp;
4773   const Register Rsuper = R3_tmp;
4774   const Register Rsub   = R4_tmp;
4775   const Register Rsubtype_check_tmp1 = R0_tmp;
4776   const Register Rsubtype_check_tmp2 = R1_tmp;
4777 
4778   __ cbz(Robj, is_null);
4779 
4780   __ load_klass(Rsub, Robj);
4781 
4782   // Get cpool & tags index
4783   __ get_cpool_and_tags(Rcpool, Rtags);
4784   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4785 
4786   // See if bytecode has already been quicked
4787   __ add(Rtemp, Rtags, Rindex);
4788 #ifdef AARCH64
4789   // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4790   __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4791   __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4792 #else
4793   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4794 #endif // AARCH64
4795   __ cmp(Rtemp, JVM_CONSTANT_Class);
4796 
4797 #ifndef AARCH64
4798   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4799 #endif // !AARCH64
4800 
4801   __ b(quicked, eq);
4802 
4803   __ push(atos);
4804   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4805   // vm_result_2 has metadata result
4806   __ get_vm_result_2(Rsuper, Robj);
4807   __ pop_ptr(Robj);
4808   __ b(resolved);
4809 
4810   // Get superklass in Rsuper and subklass in Rsub
4811   __ bind(quicked);
4812   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4813 
4814   __ bind(resolved);
4815   __ load_klass(Rsub, Robj);
4816 
4817   // Generate subtype check. Blows both tmps and Rtemp.
4818   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4819 
4820   // Come here on success
4821   __ mov(R0_tos, 1);
4822   __ b(done);
4823 
4824   __ bind(not_subtype);
4825   // Come here on failure
4826   __ profile_typecheck_failed(R1_tmp);
4827   __ mov(R0_tos, 0);
4828 
4829   // Collect counts on whether this test sees NULLs a lot or not.
4830   if (ProfileInterpreter) {
4831     __ b(done);
4832     __ bind(is_null);
4833     __ profile_null_seen(R1_tmp);
4834   } else {
4835     __ bind(is_null);   // same as 'done'
4836   }
4837   __ bind(done);
4838 }
4839 
4840 
4841 //----------------------------------------------------------------------------------------------------
4842 // Breakpoints
4843 void TemplateTable::_breakpoint() {
4844 
4845   // Note: We get here even if we are single stepping..
4846   // jbug inists on setting breakpoints at every bytecode
4847   // even if we are in single step mode.
4848 
4849   transition(vtos, vtos);
4850 
4851   // get the unpatched byte code
4852   __ mov(R1, Rmethod);
4853   __ mov(R2, Rbcp);
4854   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4855 #ifdef AARCH64
4856   __ sxtw(Rtmp_save0, R0);
4857 #else
4858   __ mov(Rtmp_save0, R0);
4859 #endif // AARCH64
4860 
4861   // post the breakpoint event
4862   __ mov(R1, Rmethod);
4863   __ mov(R2, Rbcp);
4864   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4865 
4866   // complete the execution of original bytecode
4867   __ mov(R3_bytecode, Rtmp_save0);
4868   __ dispatch_only_normal(vtos);
4869 }
4870 
4871 
4872 //----------------------------------------------------------------------------------------------------
4873 // Exceptions
4874 
4875 void TemplateTable::athrow() {
4876   transition(atos, vtos);
4877   __ mov(Rexception_obj, R0_tos);
4878   __ null_check(Rexception_obj, Rtemp);
4879   __ b(Interpreter::throw_exception_entry());
4880 }
4881 
4882 
4883 //----------------------------------------------------------------------------------------------------
4884 // Synchronization
4885 //
4886 // Note: monitorenter & exit are symmetric routines; which is reflected
4887 //       in the assembly code structure as well
4888 //
4889 // Stack layout:
4890 //
4891 // [expressions  ] <--- Rstack_top        = expression stack top
4892 // ..
4893 // [expressions  ]
4894 // [monitor entry] <--- monitor block top = expression stack bot
4895 // ..
4896 // [monitor entry]
4897 // [frame data   ] <--- monitor block bot
4898 // ...
4899 // [saved FP     ] <--- FP
4900 
4901 
4902 void TemplateTable::monitorenter() {
4903   transition(atos, vtos);
4904 
4905   const Register Robj = R0_tos;
4906   const Register Rentry = R1_tmp;
4907 
4908   // check for NULL object
4909   __ null_check(Robj, Rtemp);
4910 
4911   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4912   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4913   Label allocate_monitor, allocated;
4914 
4915   // initialize entry pointer
4916   __ mov(Rentry, 0);                             // points to free slot or NULL
4917 
4918   // find a free slot in the monitor block (result in Rentry)
4919   { Label loop, exit;
4920     const Register Rcur = R2_tmp;
4921     const Register Rcur_obj = Rtemp;
4922     const Register Rbottom = R3_tmp;
4923     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4924 
4925     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4926                                  // points to current entry, starting with top-most entry
4927     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4928                                  // points to word before bottom of monitor block
4929 
4930     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4931 #ifndef AARCH64
4932     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4933                                                  // prefetch monitor's object for the first iteration
4934 #endif // !AARCH64
4935     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4936 
4937     __ bind(loop);
4938 #ifdef AARCH64
4939     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4940 #endif // AARCH64
4941     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4942     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4943 
4944     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4945     __ b(exit, eq);                              // if same object then stop searching
4946 
4947     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4948 
4949     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4950 #ifndef AARCH64
4951     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4952                                                  // prefetch monitor's object for the next iteration
4953 #endif // !AARCH64
4954     __ b(loop, ne);                              // if not at bottom then check this entry
4955     __ bind(exit);
4956   }
4957 
4958   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4959 
4960   __ bind(allocate_monitor);
4961 
4962   // allocate one if there's no free slot
4963   { Label loop;
4964     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4965 
4966     // 1. compute new pointers
4967 
4968 #ifdef AARCH64
4969     __ check_extended_sp(Rtemp);
4970     __ sub(SP, SP, entry_size);                  // adjust extended SP
4971     __ mov(Rtemp, SP);
4972     __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
4973 #endif // AARCH64
4974 
4975     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4976                                                  // old monitor block top / expression stack bottom
4977 
4978     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4979     __ check_stack_top_on_expansion();
4980 
4981     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4982 
4983     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4984 
4985     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4986                                                  // set new monitor block top
4987 
4988     // 2. move expression stack contents
4989 
4990     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4991 #ifndef AARCH64
4992     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4993 #endif // !AARCH64
4994     __ b(allocated, eq);
4995 
4996     __ bind(loop);
4997 #ifdef AARCH64
4998     __ ldr(Rtemp, Address(R2_tmp, entry_size));             // load expression stack word from old location
4999 #endif // AARCH64
5000     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
5001                                                             // and advance to next word
5002     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
5003 #ifndef AARCH64
5004     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
5005 #endif // !AARCH64
5006     __ b(loop, ne);                                         // if not at bottom then copy next word
5007   }
5008 
5009   // call run-time routine
5010 
5011   // Rentry: points to monitor entry
5012   __ bind(allocated);
5013 
5014   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
5015   // The object has already been poped from the stack, so the expression stack looks correct.
5016   __ add(Rbcp, Rbcp, 1);
5017 
5018   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
5019   __ lock_object(Rentry);
5020 
5021   // check to make sure this monitor doesn't cause stack overflow after locking
5022   __ save_bcp();  // in case of exception
5023   __ arm_stack_overflow_check(0, Rtemp);
5024 
5025   // The bcp has already been incremented. Just need to dispatch to next instruction.
5026   __ dispatch_next(vtos);
5027 }
5028 
5029 
5030 void TemplateTable::monitorexit() {
5031   transition(atos, vtos);
5032 
5033   const Register Robj = R0_tos;
5034   const Register Rcur = R1_tmp;
5035   const Register Rbottom = R2_tmp;
5036   const Register Rcur_obj = Rtemp;
5037 
5038   // check for NULL object
5039   __ null_check(Robj, Rtemp);
5040 
5041   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
5042   Label found, throw_exception;
5043 
5044   // find matching slot
5045   { Label loop;
5046     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
5047 
5048     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
5049                                  // points to current entry, starting with top-most entry
5050     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
5051                                  // points to word before bottom of monitor block
5052 
5053     __ cmp(Rcur, Rbottom);                       // check if bottom reached
5054 #ifndef AARCH64
5055     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
5056                                                  // prefetch monitor's object for the first iteration
5057 #endif // !AARCH64
5058     __ b(throw_exception, eq);                   // throw exception if there are now monitors
5059 
5060     __ bind(loop);
5061 #ifdef AARCH64
5062     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
5063 #endif // AARCH64
5064     // check if current entry is for same object
5065     __ cmp(Rcur_obj, Robj);
5066     __ b(found, eq);                             // if same object then stop searching
5067     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
5068     __ cmp(Rcur, Rbottom);                       // check if bottom reached
5069 #ifndef AARCH64
5070     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
5071 #endif // !AARCH64
5072     __ b (loop, ne);                             // if not at bottom then check this entry
5073   }
5074 
5075   // error handling. Unlocking was not block-structured
5076   __ bind(throw_exception);
5077   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
5078   __ should_not_reach_here();
5079 
5080   // call run-time routine
5081   // Rcur: points to monitor entry
5082   __ bind(found);
5083   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
5084   __ unlock_object(Rcur);
5085   __ pop_ptr(Robj);                              // discard object
5086 }
5087 
5088 
5089 //----------------------------------------------------------------------------------------------------
5090 // Wide instructions
5091 
5092 void TemplateTable::wide() {
5093   transition(vtos, vtos);
5094   __ ldrb(R3_bytecode, at_bcp(1));
5095 
5096   InlinedAddress Ltable((address)Interpreter::_wentry_point);
5097   __ ldr_literal(Rtemp, Ltable);
5098   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
5099 
5100   __ nop(); // to avoid filling CPU pipeline with invalid instructions
5101   __ nop();
5102   __ bind_literal(Ltable);
5103 }
5104 
5105 
5106 //----------------------------------------------------------------------------------------------------
5107 // Multi arrays
5108 
5109 void TemplateTable::multianewarray() {
5110   transition(vtos, atos);
5111   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
5112 
5113   // last dim is on top of stack; we want address of first one:
5114   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
5115   // the latter wordSize to point to the beginning of the array.
5116   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5117   __ sub(R1, Rtemp, wordSize);
5118 
5119   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
5120   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5121   // MacroAssembler::StoreStore useless (included in the runtime exit path)
5122 }