1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "interpreter/interp_masm.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/cpCache.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 
  43 #define __ _masm->
  44 
  45 //----------------------------------------------------------------------------------------------------
  46 // Platform-dependent initialization
  47 
  48 void TemplateTable::pd_initialize() {
  49   // No arm specific initialization
  50 }
  51 
  52 //----------------------------------------------------------------------------------------------------
  53 // Address computation
  54 
  55 // local variables
  56 static inline Address iaddress(int n)            {
  57   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n)            { return iaddress(n + 1); }
  61 static inline Address haddress(int n)            { return iaddress(n + 0); }
  62 
  63 static inline Address faddress(int n)            { return iaddress(n); }
  64 static inline Address daddress(int n)            { return laddress(n); }
  65 static inline Address aaddress(int n)            { return iaddress(n); }
  66 
  67 
  68 void TemplateTable::get_local_base_addr(Register r, Register index) {
  69   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
  70 }
  71 
  72 Address TemplateTable::load_iaddress(Register index, Register scratch) {
  73   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
  74 }
  75 
  76 Address TemplateTable::load_aaddress(Register index, Register scratch) {
  77   return load_iaddress(index, scratch);
  78 }
  79 
  80 Address TemplateTable::load_faddress(Register index, Register scratch) {
  81 #ifdef __SOFTFP__
  82   return load_iaddress(index, scratch);
  83 #else
  84   get_local_base_addr(scratch, index);
  85   return Address(scratch);
  86 #endif // __SOFTFP__
  87 }
  88 
  89 Address TemplateTable::load_daddress(Register index, Register scratch) {
  90   get_local_base_addr(scratch, index);
  91   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  92 }
  93 
  94 // At top of Java expression stack which may be different than SP.
  95 // It isn't for category 1 objects.
  96 static inline Address at_tos() {
  97   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
  98 }
  99 
 100 static inline Address at_tos_p1() {
 101   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
 102 }
 103 
 104 static inline Address at_tos_p2() {
 105   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
 106 }
 107 
 108 
 109 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
 110 // separate ldr instructions (supports nonadjacent values).
 111 // Used for longs in all modes, and for doubles in SOFTFP mode.
 112 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
 113   const Register Rlocal_base = tmp;
 114   assert_different_registers(Rlocal_index, tmp);
 115 
 116   get_local_base_addr(Rlocal_base, Rlocal_index);
 117   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 118   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 119 }
 120 
 121 
 122 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
 123 // separate str instructions (supports nonadjacent values).
 124 // Used for longs in all modes, and for doubles in SOFTFP mode
 125 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
 126   const Register Rlocal_base = tmp;
 127   assert_different_registers(Rlocal_index, tmp);
 128 
 129   get_local_base_addr(Rlocal_base, Rlocal_index);
 130   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
 131   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
 132 }
 133 
 134 // Returns address of Java array element using temp register as address base.
 135 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
 136   int logElemSize = exact_log2(type2aelembytes(elemType));
 137   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
 138   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 139 }
 140 
 141 // Returns address of Java array element using temp register as offset from array base
 142 Address TemplateTable::get_array_elem_addr_same_base(BasicType elemType, Register array, Register index, Register temp) {
 143   int logElemSize = exact_log2(type2aelembytes(elemType));
 144   if (logElemSize == 0) {
 145     __ add(temp, index, arrayOopDesc::base_offset_in_bytes(elemType));
 146   } else {
 147     __ mov(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 148     __ add_ptr_scaled_int32(temp, temp, index, logElemSize);
 149   }
 150   return Address(array, temp);
 151 }
 152 
 153 //----------------------------------------------------------------------------------------------------
 154 // Condition conversion
 155 AsmCondition convNegCond(TemplateTable::Condition cc) {
 156   switch (cc) {
 157     case TemplateTable::equal        : return ne;
 158     case TemplateTable::not_equal    : return eq;
 159     case TemplateTable::less         : return ge;
 160     case TemplateTable::less_equal   : return gt;
 161     case TemplateTable::greater      : return le;
 162     case TemplateTable::greater_equal: return lt;
 163   }
 164   ShouldNotReachHere();
 165   return nv;
 166 }
 167 
 168 //----------------------------------------------------------------------------------------------------
 169 // Miscelaneous helper routines
 170 
 171 // Store an oop (or NULL) at the address described by obj.
 172 // Blows all volatile registers R0-R3, Rtemp, LR).
 173 // Also destroys new_val and obj.base().
 174 static void do_oop_store(InterpreterMacroAssembler* _masm,
 175                          Address obj,
 176                          Register new_val,
 177                          Register tmp1,
 178                          Register tmp2,
 179                          Register tmp3,
 180                          bool is_null,
 181                          DecoratorSet decorators = 0) {
 182 
 183   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
 184   if (is_null) {
 185     __ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
 186   } else {
 187     __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
 188   }
 189 }
 190 
 191 static void do_oop_load(InterpreterMacroAssembler* _masm,
 192                         Register dst,
 193                         Address obj,
 194                         DecoratorSet decorators = 0) {
 195   __ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
 196 }
 197 
 198 Address TemplateTable::at_bcp(int offset) {
 199   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 200   return Address(Rbcp, offset);
 201 }
 202 
 203 
 204 // Blows volatile registers R0-R3, Rtemp, LR.
 205 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 206                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 207                                    int byte_no) {
 208   assert_different_registers(bc_reg, temp_reg);
 209   if (!RewriteBytecodes)  return;
 210   Label L_patch_done;
 211 
 212   switch (bc) {
 213   case Bytecodes::_fast_aputfield:
 214   case Bytecodes::_fast_bputfield:
 215   case Bytecodes::_fast_zputfield:
 216   case Bytecodes::_fast_cputfield:
 217   case Bytecodes::_fast_dputfield:
 218   case Bytecodes::_fast_fputfield:
 219   case Bytecodes::_fast_iputfield:
 220   case Bytecodes::_fast_lputfield:
 221   case Bytecodes::_fast_sputfield:
 222     {
 223       // We skip bytecode quickening for putfield instructions when
 224       // the put_code written to the constant pool cache is zero.
 225       // This is required so that every execution of this instruction
 226       // calls out to InterpreterRuntime::resolve_get_put to do
 227       // additional, required work.
 228       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 229       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 230       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
 231       __ mov(bc_reg, bc);
 232       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
 233     }
 234     break;
 235   default:
 236     assert(byte_no == -1, "sanity");
 237     // the pair bytecodes have already done the load.
 238     if (load_bc_into_bc_reg) {
 239       __ mov(bc_reg, bc);
 240     }
 241   }
 242 
 243   if (__ can_post_breakpoint()) {
 244     Label L_fast_patch;
 245     // if a breakpoint is present we can't rewrite the stream directly
 246     __ ldrb(temp_reg, at_bcp(0));
 247     __ cmp(temp_reg, Bytecodes::_breakpoint);
 248     __ b(L_fast_patch, ne);
 249     if (bc_reg != R3) {
 250       __ mov(R3, bc_reg);
 251     }
 252     __ mov(R1, Rmethod);
 253     __ mov(R2, Rbcp);
 254     // Let breakpoint table handling rewrite to quicker bytecode
 255     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
 256     __ b(L_patch_done);
 257     __ bind(L_fast_patch);
 258   }
 259 
 260 #ifdef ASSERT
 261   Label L_okay;
 262   __ ldrb(temp_reg, at_bcp(0));
 263   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
 264   __ b(L_okay, eq);
 265   __ cmp(temp_reg, bc_reg);
 266   __ b(L_okay, eq);
 267   __ stop("patching the wrong bytecode");
 268   __ bind(L_okay);
 269 #endif
 270 
 271   // patch bytecode
 272   __ strb(bc_reg, at_bcp(0));
 273   __ bind(L_patch_done);
 274 }
 275 
 276 //----------------------------------------------------------------------------------------------------
 277 // Individual instructions
 278 
 279 void TemplateTable::nop() {
 280   transition(vtos, vtos);
 281   // nothing to do
 282 }
 283 
 284 void TemplateTable::shouldnotreachhere() {
 285   transition(vtos, vtos);
 286   __ stop("shouldnotreachhere bytecode");
 287 }
 288 
 289 
 290 
 291 void TemplateTable::aconst_null() {
 292   transition(vtos, atos);
 293   __ mov(R0_tos, 0);
 294 }
 295 
 296 
 297 void TemplateTable::iconst(int value) {
 298   transition(vtos, itos);
 299   __ mov_slow(R0_tos, value);
 300 }
 301 
 302 
 303 void TemplateTable::lconst(int value) {
 304   transition(vtos, ltos);
 305   assert((value == 0) || (value == 1), "unexpected long constant");
 306   __ mov(R0_tos, value);
 307   __ mov(R1_tos_hi, 0);
 308 }
 309 
 310 
 311 void TemplateTable::fconst(int value) {
 312   transition(vtos, ftos);
 313   const int zero = 0;         // 0.0f
 314   const int one = 0x3f800000; // 1.0f
 315   const int two = 0x40000000; // 2.0f
 316 
 317   switch(value) {
 318   case 0:   __ mov(R0_tos, zero);   break;
 319   case 1:   __ mov(R0_tos, one);    break;
 320   case 2:   __ mov(R0_tos, two);    break;
 321   default:  ShouldNotReachHere();   break;
 322   }
 323 
 324 #ifndef __SOFTFP__
 325   __ fmsr(S0_tos, R0_tos);
 326 #endif // !__SOFTFP__
 327 }
 328 
 329 
 330 void TemplateTable::dconst(int value) {
 331   transition(vtos, dtos);
 332   const int one_lo = 0;            // low part of 1.0
 333   const int one_hi = 0x3ff00000;   // high part of 1.0
 334 
 335   if (value == 0) {
 336 #ifdef __SOFTFP__
 337     __ mov(R0_tos_lo, 0);
 338     __ mov(R1_tos_hi, 0);
 339 #else
 340     __ mov(R0_tmp, 0);
 341     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
 342 #endif // __SOFTFP__
 343   } else if (value == 1) {
 344     __ mov(R0_tos_lo, one_lo);
 345     __ mov_slow(R1_tos_hi, one_hi);
 346 #ifndef __SOFTFP__
 347     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
 348 #endif // !__SOFTFP__
 349   } else {
 350     ShouldNotReachHere();
 351   }
 352 }
 353 
 354 
 355 void TemplateTable::bipush() {
 356   transition(vtos, itos);
 357   __ ldrsb(R0_tos, at_bcp(1));
 358 }
 359 
 360 
 361 void TemplateTable::sipush() {
 362   transition(vtos, itos);
 363   __ ldrsb(R0_tmp, at_bcp(1));
 364   __ ldrb(R1_tmp, at_bcp(2));
 365   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
 366 }
 367 
 368 
 369 void TemplateTable::ldc(bool wide) {
 370   transition(vtos, vtos);
 371   Label fastCase, Condy, Done;
 372 
 373   const Register Rindex = R1_tmp;
 374   const Register Rcpool = R2_tmp;
 375   const Register Rtags  = R3_tmp;
 376   const Register RtagType = R3_tmp;
 377 
 378   if (wide) {
 379     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 380   } else {
 381     __ ldrb(Rindex, at_bcp(1));
 382   }
 383   __ get_cpool_and_tags(Rcpool, Rtags);
 384 
 385   const int base_offset = ConstantPool::header_size() * wordSize;
 386   const int tags_offset = Array<u1>::base_offset_in_bytes();
 387 
 388   // get const type
 389   __ add(Rtemp, Rtags, tags_offset);
 390   __ ldrb(RtagType, Address(Rtemp, Rindex));
 391   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
 392 
 393   // unresolved class - get the resolved class
 394   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
 395 
 396   // unresolved class in error (resolution failed) - call into runtime
 397   // so that the same error from first resolution attempt is thrown.
 398   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
 399 
 400   // resolved class - need to call vm to get java mirror of the class
 401   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
 402 
 403   __ b(fastCase, ne);
 404 
 405   // slow case - call runtime
 406   __ mov(R1, wide);
 407   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
 408   __ push(atos);
 409   __ b(Done);
 410 
 411   // int, float, String
 412   __ bind(fastCase);
 413 
 414   __ cmp(RtagType, JVM_CONSTANT_Integer);
 415   __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
 416   __ b(Condy, ne);
 417 
 418   // itos, ftos
 419   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 420   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
 421 
 422   // floats and ints are placed on stack in the same way, so
 423   // we can use push(itos) to transfer float value without VFP
 424   __ push(itos);
 425   __ b(Done);
 426 
 427   __ bind(Condy);
 428   condy_helper(Done);
 429 
 430   __ bind(Done);
 431 }
 432 
 433 // Fast path for caching oop constants.
 434 void TemplateTable::fast_aldc(bool wide) {
 435   transition(vtos, atos);
 436   int index_size = wide ? sizeof(u2) : sizeof(u1);
 437   Label resolved;
 438 
 439   // We are resolved if the resolved reference cache entry contains a
 440   // non-null object (CallSite, etc.)
 441   assert_different_registers(R0_tos, R2_tmp);
 442   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
 443   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
 444   __ cbnz(R0_tos, resolved);
 445 
 446   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 447 
 448   // first time invocation - must resolve first
 449   __ mov(R1, (int)bytecode());
 450   __ call_VM(R0_tos, entry, R1);
 451   __ bind(resolved);
 452 
 453   { // Check for the null sentinel.
 454     // If we just called the VM, that already did the mapping for us,
 455     // but it's harmless to retry.
 456     Label notNull;
 457     Register result = R0;
 458     Register tmp = R1;
 459     Register rarg = R2;
 460 
 461     // Stash null_sentinel address to get its value later
 462     __ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
 463     __ ldr(tmp, Address(rarg));
 464     __ cmp(result, tmp);
 465     __ b(notNull, ne);
 466     __ mov(result, 0);  // NULL object reference
 467     __ bind(notNull);
 468   }
 469 
 470   if (VerifyOops) {
 471     __ verify_oop(R0_tos);
 472   }
 473 }
 474 
 475 void TemplateTable::ldc2_w() {
 476   transition(vtos, vtos);
 477   const Register Rtags  = R2_tmp;
 478   const Register Rindex = R3_tmp;
 479   const Register Rcpool = R4_tmp;
 480   const Register Rbase  = R5_tmp;
 481 
 482   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
 483 
 484   __ get_cpool_and_tags(Rcpool, Rtags);
 485   const int base_offset = ConstantPool::header_size() * wordSize;
 486   const int tags_offset = Array<u1>::base_offset_in_bytes();
 487 
 488   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
 489 
 490   Label Condy, exit;
 491 #ifdef __ABI_HARD__
 492   Label Long;
 493   // get type from tags
 494   __ add(Rtemp, Rtags, tags_offset);
 495   __ ldrb(Rtemp, Address(Rtemp, Rindex));
 496   __ cmp(Rtemp, JVM_CONSTANT_Double);
 497   __ b(Long, ne);
 498   __ ldr_double(D0_tos, Address(Rbase, base_offset));
 499 
 500   __ push(dtos);
 501   __ b(exit);
 502   __ bind(Long);
 503 #endif
 504 
 505   __ cmp(Rtemp, JVM_CONSTANT_Long);
 506   __ b(Condy, ne);
 507   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
 508   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
 509   __ push(ltos);
 510   __ b(exit);
 511 
 512   __ bind(Condy);
 513   condy_helper(exit);
 514 
 515   __ bind(exit);
 516 }
 517 
 518 
 519 void TemplateTable::condy_helper(Label& Done)
 520 {
 521   Register obj   = R0_tmp;
 522   Register rtmp  = R1_tmp;
 523   Register flags = R2_tmp;
 524   Register off   = R3_tmp;
 525 
 526   __ mov(rtmp, (int) bytecode());
 527   __ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp);
 528   __ get_vm_result_2(flags, rtmp);
 529 
 530   // VMr = obj = base address to find primitive value to push
 531   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 532   __ mov(off, flags);
 533 
 534   __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 535   __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
 536 
 537   const Address field(obj, off);
 538 
 539   __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
 540   // Make sure we don't need to mask flags after the above shift
 541   ConstantPoolCacheEntry::verify_tos_state_shift();
 542 
 543   switch (bytecode()) {
 544     case Bytecodes::_ldc:
 545     case Bytecodes::_ldc_w:
 546       {
 547         // tos in (itos, ftos, stos, btos, ctos, ztos)
 548         Label notIntFloat, notShort, notByte, notChar, notBool;
 549         __ cmp(flags, itos);
 550         __ cond_cmp(flags, ftos, ne);
 551         __ b(notIntFloat, ne);
 552         __ ldr(R0_tos, field);
 553         __ push(itos);
 554         __ b(Done);
 555 
 556         __ bind(notIntFloat);
 557         __ cmp(flags, stos);
 558         __ b(notShort, ne);
 559         __ ldrsh(R0_tos, field);
 560         __ push(stos);
 561         __ b(Done);
 562 
 563         __ bind(notShort);
 564         __ cmp(flags, btos);
 565         __ b(notByte, ne);
 566         __ ldrsb(R0_tos, field);
 567         __ push(btos);
 568         __ b(Done);
 569 
 570         __ bind(notByte);
 571         __ cmp(flags, ctos);
 572         __ b(notChar, ne);
 573         __ ldrh(R0_tos, field);
 574         __ push(ctos);
 575         __ b(Done);
 576 
 577         __ bind(notChar);
 578         __ cmp(flags, ztos);
 579         __ b(notBool, ne);
 580         __ ldrsb(R0_tos, field);
 581         __ push(ztos);
 582         __ b(Done);
 583 
 584         __ bind(notBool);
 585         break;
 586       }
 587 
 588     case Bytecodes::_ldc2_w:
 589       {
 590         Label notLongDouble;
 591         __ cmp(flags, ltos);
 592         __ cond_cmp(flags, dtos, ne);
 593         __ b(notLongDouble, ne);
 594 
 595         __ add(rtmp, obj, wordSize);
 596         __ ldr(R0_tos_lo, Address(obj, off));
 597         __ ldr(R1_tos_hi, Address(rtmp, off));
 598         __ push(ltos);
 599         __ b(Done);
 600 
 601         __ bind(notLongDouble);
 602 
 603         break;
 604       }
 605 
 606     default:
 607       ShouldNotReachHere();
 608     }
 609 
 610     __ stop("bad ldc/condy");
 611 }
 612 
 613 
 614 void TemplateTable::locals_index(Register reg, int offset) {
 615   __ ldrb(reg, at_bcp(offset));
 616 }
 617 
 618 void TemplateTable::iload() {
 619   iload_internal();
 620 }
 621 
 622 void TemplateTable::nofast_iload() {
 623   iload_internal(may_not_rewrite);
 624 }
 625 
 626 void TemplateTable::iload_internal(RewriteControl rc) {
 627   transition(vtos, itos);
 628 
 629   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
 630     Label rewrite, done;
 631     const Register next_bytecode = R1_tmp;
 632     const Register target_bytecode = R2_tmp;
 633 
 634     // get next byte
 635     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 636     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 637     // last two iloads in a pair.  Comparing against fast_iload means that
 638     // the next bytecode is neither an iload or a caload, and therefore
 639     // an iload pair.
 640     __ cmp(next_bytecode, Bytecodes::_iload);
 641     __ b(done, eq);
 642 
 643     __ cmp(next_bytecode, Bytecodes::_fast_iload);
 644     __ mov(target_bytecode, Bytecodes::_fast_iload2);
 645     __ b(rewrite, eq);
 646 
 647     // if _caload, rewrite to fast_icaload
 648     __ cmp(next_bytecode, Bytecodes::_caload);
 649     __ mov(target_bytecode, Bytecodes::_fast_icaload);
 650     __ b(rewrite, eq);
 651 
 652     // rewrite so iload doesn't check again.
 653     __ mov(target_bytecode, Bytecodes::_fast_iload);
 654 
 655     // rewrite
 656     // R2: fast bytecode
 657     __ bind(rewrite);
 658     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
 659     __ bind(done);
 660   }
 661 
 662   // Get the local value into tos
 663   const Register Rlocal_index = R1_tmp;
 664   locals_index(Rlocal_index);
 665   Address local = load_iaddress(Rlocal_index, Rtemp);
 666   __ ldr_s32(R0_tos, local);
 667 }
 668 
 669 
 670 void TemplateTable::fast_iload2() {
 671   transition(vtos, itos);
 672   const Register Rlocal_index = R1_tmp;
 673 
 674   locals_index(Rlocal_index);
 675   Address local = load_iaddress(Rlocal_index, Rtemp);
 676   __ ldr_s32(R0_tos, local);
 677   __ push(itos);
 678 
 679   locals_index(Rlocal_index, 3);
 680   local = load_iaddress(Rlocal_index, Rtemp);
 681   __ ldr_s32(R0_tos, local);
 682 }
 683 
 684 void TemplateTable::fast_iload() {
 685   transition(vtos, itos);
 686   const Register Rlocal_index = R1_tmp;
 687 
 688   locals_index(Rlocal_index);
 689   Address local = load_iaddress(Rlocal_index, Rtemp);
 690   __ ldr_s32(R0_tos, local);
 691 }
 692 
 693 
 694 void TemplateTable::lload() {
 695   transition(vtos, ltos);
 696   const Register Rlocal_index = R2_tmp;
 697 
 698   locals_index(Rlocal_index);
 699   load_category2_local(Rlocal_index, R3_tmp);
 700 }
 701 
 702 
 703 void TemplateTable::fload() {
 704   transition(vtos, ftos);
 705   const Register Rlocal_index = R2_tmp;
 706 
 707   // Get the local value into tos
 708   locals_index(Rlocal_index);
 709   Address local = load_faddress(Rlocal_index, Rtemp);
 710 #ifdef __SOFTFP__
 711   __ ldr(R0_tos, local);
 712 #else
 713   __ ldr_float(S0_tos, local);
 714 #endif // __SOFTFP__
 715 }
 716 
 717 
 718 void TemplateTable::dload() {
 719   transition(vtos, dtos);
 720   const Register Rlocal_index = R2_tmp;
 721 
 722   locals_index(Rlocal_index);
 723 
 724 #ifdef __SOFTFP__
 725   load_category2_local(Rlocal_index, R3_tmp);
 726 #else
 727   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 728 #endif // __SOFTFP__
 729 }
 730 
 731 
 732 void TemplateTable::aload() {
 733   transition(vtos, atos);
 734   const Register Rlocal_index = R1_tmp;
 735 
 736   locals_index(Rlocal_index);
 737   Address local = load_aaddress(Rlocal_index, Rtemp);
 738   __ ldr(R0_tos, local);
 739 }
 740 
 741 
 742 void TemplateTable::locals_index_wide(Register reg) {
 743   assert_different_registers(reg, Rtemp);
 744   __ ldrb(Rtemp, at_bcp(2));
 745   __ ldrb(reg, at_bcp(3));
 746   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
 747 }
 748 
 749 
 750 void TemplateTable::wide_iload() {
 751   transition(vtos, itos);
 752   const Register Rlocal_index = R2_tmp;
 753 
 754   locals_index_wide(Rlocal_index);
 755   Address local = load_iaddress(Rlocal_index, Rtemp);
 756   __ ldr_s32(R0_tos, local);
 757 }
 758 
 759 
 760 void TemplateTable::wide_lload() {
 761   transition(vtos, ltos);
 762   const Register Rlocal_index = R2_tmp;
 763   const Register Rlocal_base = R3_tmp;
 764 
 765   locals_index_wide(Rlocal_index);
 766   load_category2_local(Rlocal_index, R3_tmp);
 767 }
 768 
 769 
 770 void TemplateTable::wide_fload() {
 771   transition(vtos, ftos);
 772   const Register Rlocal_index = R2_tmp;
 773 
 774   locals_index_wide(Rlocal_index);
 775   Address local = load_faddress(Rlocal_index, Rtemp);
 776 #ifdef __SOFTFP__
 777   __ ldr(R0_tos, local);
 778 #else
 779   __ ldr_float(S0_tos, local);
 780 #endif // __SOFTFP__
 781 }
 782 
 783 
 784 void TemplateTable::wide_dload() {
 785   transition(vtos, dtos);
 786   const Register Rlocal_index = R2_tmp;
 787 
 788   locals_index_wide(Rlocal_index);
 789 #ifdef __SOFTFP__
 790   load_category2_local(Rlocal_index, R3_tmp);
 791 #else
 792   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
 793 #endif // __SOFTFP__
 794 }
 795 
 796 
 797 void TemplateTable::wide_aload() {
 798   transition(vtos, atos);
 799   const Register Rlocal_index = R2_tmp;
 800 
 801   locals_index_wide(Rlocal_index);
 802   Address local = load_aaddress(Rlocal_index, Rtemp);
 803   __ ldr(R0_tos, local);
 804 }
 805 
 806 void TemplateTable::index_check(Register array, Register index) {
 807   // Pop ptr into array
 808   __ pop_ptr(array);
 809   index_check_without_pop(array, index);
 810 }
 811 
 812 void TemplateTable::index_check_without_pop(Register array, Register index) {
 813   assert_different_registers(array, index, Rtemp);
 814   // check array
 815   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
 816   // check index
 817   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
 818   __ cmp_32(index, Rtemp);
 819   if (index != R4_ArrayIndexOutOfBounds_index) {
 820     // convention with generate_ArrayIndexOutOfBounds_handler()
 821     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
 822   }
 823   __ mov(R1, array, hs);
 824   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
 825 }
 826 
 827 
 828 void TemplateTable::iaload() {
 829   transition(itos, itos);
 830   const Register Rarray = R1_tmp;
 831   const Register Rindex = R0_tos;
 832 
 833   index_check(Rarray, Rindex);
 834   Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
 835   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 836 }
 837 
 838 
 839 void TemplateTable::laload() {
 840   transition(itos, ltos);
 841   const Register Rarray = R1_tmp;
 842   const Register Rindex = R0_tos;
 843 
 844   index_check(Rarray, Rindex);
 845 
 846   Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
 847   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg);
 848 }
 849 
 850 
 851 void TemplateTable::faload() {
 852   transition(itos, ftos);
 853   const Register Rarray = R1_tmp;
 854   const Register Rindex = R0_tos;
 855 
 856   index_check(Rarray, Rindex);
 857 
 858   Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
 859   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg);
 860 }
 861 
 862 
 863 void TemplateTable::daload() {
 864   transition(itos, dtos);
 865   const Register Rarray = R1_tmp;
 866   const Register Rindex = R0_tos;
 867 
 868   index_check(Rarray, Rindex);
 869 
 870   Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
 871   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg);
 872 }
 873 
 874 
 875 void TemplateTable::aaload() {
 876   transition(itos, atos);
 877   const Register Rarray = R1_tmp;
 878   const Register Rindex = R0_tos;
 879 
 880   index_check(Rarray, Rindex);
 881   do_oop_load(_masm, R0_tos, get_array_elem_addr_same_base(T_OBJECT, Rarray, Rindex, Rtemp), IS_ARRAY);
 882 }
 883 
 884 
 885 void TemplateTable::baload() {
 886   transition(itos, itos);
 887   const Register Rarray = R1_tmp;
 888   const Register Rindex = R0_tos;
 889 
 890   index_check(Rarray, Rindex);
 891   Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
 892   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 893 }
 894 
 895 
 896 void TemplateTable::caload() {
 897   transition(itos, itos);
 898   const Register Rarray = R1_tmp;
 899   const Register Rindex = R0_tos;
 900 
 901   index_check(Rarray, Rindex);
 902   Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
 903   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 904 }
 905 
 906 
 907 // iload followed by caload frequent pair
 908 void TemplateTable::fast_icaload() {
 909   transition(vtos, itos);
 910   const Register Rlocal_index = R1_tmp;
 911   const Register Rarray = R1_tmp;
 912   const Register Rindex = R4_tmp; // index_check prefers index on R4
 913   assert_different_registers(Rlocal_index, Rindex);
 914   assert_different_registers(Rarray, Rindex);
 915 
 916   // load index out of locals
 917   locals_index(Rlocal_index);
 918   Address local = load_iaddress(Rlocal_index, Rtemp);
 919   __ ldr_s32(Rindex, local);
 920 
 921   // get array element
 922   index_check(Rarray, Rindex);
 923   Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
 924   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 925 }
 926 
 927 
 928 void TemplateTable::saload() {
 929   transition(itos, itos);
 930   const Register Rarray = R1_tmp;
 931   const Register Rindex = R0_tos;
 932 
 933   index_check(Rarray, Rindex);
 934   Address addr = get_array_elem_addr_same_base(T_SHORT, Rarray, Rindex, Rtemp);
 935   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 936 }
 937 
 938 
 939 void TemplateTable::iload(int n) {
 940   transition(vtos, itos);
 941   __ ldr_s32(R0_tos, iaddress(n));
 942 }
 943 
 944 
 945 void TemplateTable::lload(int n) {
 946   transition(vtos, ltos);
 947   __ ldr(R0_tos_lo, laddress(n));
 948   __ ldr(R1_tos_hi, haddress(n));
 949 }
 950 
 951 
 952 void TemplateTable::fload(int n) {
 953   transition(vtos, ftos);
 954 #ifdef __SOFTFP__
 955   __ ldr(R0_tos, faddress(n));
 956 #else
 957   __ ldr_float(S0_tos, faddress(n));
 958 #endif // __SOFTFP__
 959 }
 960 
 961 
 962 void TemplateTable::dload(int n) {
 963   transition(vtos, dtos);
 964 #ifdef __SOFTFP__
 965   __ ldr(R0_tos_lo, laddress(n));
 966   __ ldr(R1_tos_hi, haddress(n));
 967 #else
 968   __ ldr_double(D0_tos, daddress(n));
 969 #endif // __SOFTFP__
 970 }
 971 
 972 
 973 void TemplateTable::aload(int n) {
 974   transition(vtos, atos);
 975   __ ldr(R0_tos, aaddress(n));
 976 }
 977 
 978 void TemplateTable::aload_0() {
 979   aload_0_internal();
 980 }
 981 
 982 void TemplateTable::nofast_aload_0() {
 983   aload_0_internal(may_not_rewrite);
 984 }
 985 
 986 void TemplateTable::aload_0_internal(RewriteControl rc) {
 987   transition(vtos, atos);
 988   // According to bytecode histograms, the pairs:
 989   //
 990   // _aload_0, _fast_igetfield
 991   // _aload_0, _fast_agetfield
 992   // _aload_0, _fast_fgetfield
 993   //
 994   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
 995   // bytecode checks if the next bytecode is either _fast_igetfield,
 996   // _fast_agetfield or _fast_fgetfield and then rewrites the
 997   // current bytecode into a pair bytecode; otherwise it rewrites the current
 998   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
 999   //
1000   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
1001   //       otherwise we may miss an opportunity for a pair.
1002   //
1003   // Also rewrite frequent pairs
1004   //   aload_0, aload_1
1005   //   aload_0, iload_1
1006   // These bytecodes with a small amount of code are most profitable to rewrite
1007   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
1008     Label rewrite, done;
1009     const Register next_bytecode = R1_tmp;
1010     const Register target_bytecode = R2_tmp;
1011 
1012     // get next byte
1013     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
1014 
1015     // if _getfield then wait with rewrite
1016     __ cmp(next_bytecode, Bytecodes::_getfield);
1017     __ b(done, eq);
1018 
1019     // if _igetfield then rewrite to _fast_iaccess_0
1020     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1021     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1022     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1023     __ b(rewrite, eq);
1024 
1025     // if _agetfield then rewrite to _fast_aaccess_0
1026     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1027     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1028     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1029     __ b(rewrite, eq);
1030 
1031     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1032     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1033     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1034 
1035     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1036     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1037     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1038 
1039     // rewrite
1040     __ bind(rewrite);
1041     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1042 
1043     __ bind(done);
1044   }
1045 
1046   aload(0);
1047 }
1048 
1049 void TemplateTable::istore() {
1050   transition(itos, vtos);
1051   const Register Rlocal_index = R2_tmp;
1052 
1053   locals_index(Rlocal_index);
1054   Address local = load_iaddress(Rlocal_index, Rtemp);
1055   __ str_32(R0_tos, local);
1056 }
1057 
1058 
1059 void TemplateTable::lstore() {
1060   transition(ltos, vtos);
1061   const Register Rlocal_index = R2_tmp;
1062 
1063   locals_index(Rlocal_index);
1064   store_category2_local(Rlocal_index, R3_tmp);
1065 }
1066 
1067 
1068 void TemplateTable::fstore() {
1069   transition(ftos, vtos);
1070   const Register Rlocal_index = R2_tmp;
1071 
1072   locals_index(Rlocal_index);
1073   Address local = load_faddress(Rlocal_index, Rtemp);
1074 #ifdef __SOFTFP__
1075   __ str(R0_tos, local);
1076 #else
1077   __ str_float(S0_tos, local);
1078 #endif // __SOFTFP__
1079 }
1080 
1081 
1082 void TemplateTable::dstore() {
1083   transition(dtos, vtos);
1084   const Register Rlocal_index = R2_tmp;
1085 
1086   locals_index(Rlocal_index);
1087 
1088 #ifdef __SOFTFP__
1089   store_category2_local(Rlocal_index, R3_tmp);
1090 #else
1091   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1092 #endif // __SOFTFP__
1093 }
1094 
1095 
1096 void TemplateTable::astore() {
1097   transition(vtos, vtos);
1098   const Register Rlocal_index = R1_tmp;
1099 
1100   __ pop_ptr(R0_tos);
1101   locals_index(Rlocal_index);
1102   Address local = load_aaddress(Rlocal_index, Rtemp);
1103   __ str(R0_tos, local);
1104 }
1105 
1106 
1107 void TemplateTable::wide_istore() {
1108   transition(vtos, vtos);
1109   const Register Rlocal_index = R2_tmp;
1110 
1111   __ pop_i(R0_tos);
1112   locals_index_wide(Rlocal_index);
1113   Address local = load_iaddress(Rlocal_index, Rtemp);
1114   __ str_32(R0_tos, local);
1115 }
1116 
1117 
1118 void TemplateTable::wide_lstore() {
1119   transition(vtos, vtos);
1120   const Register Rlocal_index = R2_tmp;
1121   const Register Rlocal_base = R3_tmp;
1122 
1123   __ pop_l(R0_tos_lo, R1_tos_hi);
1124 
1125   locals_index_wide(Rlocal_index);
1126   store_category2_local(Rlocal_index, R3_tmp);
1127 }
1128 
1129 
1130 void TemplateTable::wide_fstore() {
1131   wide_istore();
1132 }
1133 
1134 
1135 void TemplateTable::wide_dstore() {
1136   wide_lstore();
1137 }
1138 
1139 
1140 void TemplateTable::wide_astore() {
1141   transition(vtos, vtos);
1142   const Register Rlocal_index = R2_tmp;
1143 
1144   __ pop_ptr(R0_tos);
1145   locals_index_wide(Rlocal_index);
1146   Address local = load_aaddress(Rlocal_index, Rtemp);
1147   __ str(R0_tos, local);
1148 }
1149 
1150 
1151 void TemplateTable::iastore() {
1152   transition(itos, vtos);
1153   const Register Rindex = R4_tmp; // index_check prefers index in R4
1154   const Register Rarray = R3_tmp;
1155   // R0_tos: value
1156 
1157   __ pop_i(Rindex);
1158   index_check(Rarray, Rindex);
1159   Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
1160   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1161 }
1162 
1163 
1164 void TemplateTable::lastore() {
1165   transition(ltos, vtos);
1166   const Register Rindex = R4_tmp; // index_check prefers index in R4
1167   const Register Rarray = R3_tmp;
1168   // R0_tos_lo:R1_tos_hi: value
1169 
1170   __ pop_i(Rindex);
1171   index_check(Rarray, Rindex);
1172 
1173   Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
1174   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg, false);
1175 }
1176 
1177 
1178 void TemplateTable::fastore() {
1179   transition(ftos, vtos);
1180   const Register Rindex = R4_tmp; // index_check prefers index in R4
1181   const Register Rarray = R3_tmp;
1182   // S0_tos/R0_tos: value
1183 
1184   __ pop_i(Rindex);
1185   index_check(Rarray, Rindex);
1186   Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
1187   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg, false);
1188 }
1189 
1190 
1191 void TemplateTable::dastore() {
1192   transition(dtos, vtos);
1193   const Register Rindex = R4_tmp; // index_check prefers index in R4
1194   const Register Rarray = R3_tmp;
1195   // D0_tos / R0_tos_lo:R1_to_hi: value
1196 
1197   __ pop_i(Rindex);
1198   index_check(Rarray, Rindex);
1199 
1200   Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
1201   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg, false);
1202 }
1203 
1204 
1205 void TemplateTable::aastore() {
1206   transition(vtos, vtos);
1207   Label is_null, throw_array_store, done;
1208 
1209   const Register Raddr_1   = R1_tmp;
1210   const Register Rvalue_2  = R2_tmp;
1211   const Register Rarray_3  = R3_tmp;
1212   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1213   const Register Rsub_5    = R5_tmp;
1214   const Register Rsuper_LR = LR_tmp;
1215 
1216   // stack: ..., array, index, value
1217   __ ldr(Rvalue_2, at_tos());     // Value
1218   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1219   __ ldr(Rarray_3, at_tos_p2());  // Array
1220 
1221   index_check_without_pop(Rarray_3, Rindex_4);
1222 
1223   // Compute the array base
1224   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1225 
1226   // do array store check - check for NULL value first
1227   __ cbz(Rvalue_2, is_null);
1228 
1229   // Load subklass
1230   __ load_klass(Rsub_5, Rvalue_2);
1231   // Load superklass
1232   __ load_klass(Rtemp, Rarray_3);
1233   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1234 
1235   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1236   // Come here on success
1237 
1238   // Store value
1239   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1240 
1241   // Now store using the appropriate barrier
1242   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IS_ARRAY);
1243   __ b(done);
1244 
1245   __ bind(throw_array_store);
1246 
1247   // Come here on failure of subtype check
1248   __ profile_typecheck_failed(R0_tmp);
1249 
1250   // object is at TOS
1251   __ b(Interpreter::_throw_ArrayStoreException_entry);
1252 
1253   // Have a NULL in Rvalue_2, store NULL at array[index].
1254   __ bind(is_null);
1255   __ profile_null_seen(R0_tmp);
1256 
1257   // Store a NULL
1258   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IS_ARRAY);
1259 
1260   // Pop stack arguments
1261   __ bind(done);
1262   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1263 }
1264 
1265 
1266 void TemplateTable::bastore() {
1267   transition(itos, vtos);
1268   const Register Rindex = R4_tmp; // index_check prefers index in R4
1269   const Register Rarray = R3_tmp;
1270   // R0_tos: value
1271 
1272   __ pop_i(Rindex);
1273   index_check(Rarray, Rindex);
1274 
1275   // Need to check whether array is boolean or byte
1276   // since both types share the bastore bytecode.
1277   __ load_klass(Rtemp, Rarray);
1278   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1279   Label L_skip;
1280   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1281   __ b(L_skip, eq);
1282   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1283   __ bind(L_skip);
1284   Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
1285   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1286 }
1287 
1288 
1289 void TemplateTable::castore() {
1290   transition(itos, vtos);
1291   const Register Rindex = R4_tmp; // index_check prefers index in R4
1292   const Register Rarray = R3_tmp;
1293   // R0_tos: value
1294 
1295   __ pop_i(Rindex);
1296   index_check(Rarray, Rindex);
1297   Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
1298   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1299 }
1300 
1301 
1302 void TemplateTable::sastore() {
1303   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1304            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1305          "base offsets for char and short should be equal");
1306   castore();
1307 }
1308 
1309 
1310 void TemplateTable::istore(int n) {
1311   transition(itos, vtos);
1312   __ str_32(R0_tos, iaddress(n));
1313 }
1314 
1315 
1316 void TemplateTable::lstore(int n) {
1317   transition(ltos, vtos);
1318   __ str(R0_tos_lo, laddress(n));
1319   __ str(R1_tos_hi, haddress(n));
1320 }
1321 
1322 
1323 void TemplateTable::fstore(int n) {
1324   transition(ftos, vtos);
1325 #ifdef __SOFTFP__
1326   __ str(R0_tos, faddress(n));
1327 #else
1328   __ str_float(S0_tos, faddress(n));
1329 #endif // __SOFTFP__
1330 }
1331 
1332 
1333 void TemplateTable::dstore(int n) {
1334   transition(dtos, vtos);
1335 #ifdef __SOFTFP__
1336   __ str(R0_tos_lo, laddress(n));
1337   __ str(R1_tos_hi, haddress(n));
1338 #else
1339   __ str_double(D0_tos, daddress(n));
1340 #endif // __SOFTFP__
1341 }
1342 
1343 
1344 void TemplateTable::astore(int n) {
1345   transition(vtos, vtos);
1346   __ pop_ptr(R0_tos);
1347   __ str(R0_tos, aaddress(n));
1348 }
1349 
1350 
1351 void TemplateTable::pop() {
1352   transition(vtos, vtos);
1353   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1354 }
1355 
1356 
1357 void TemplateTable::pop2() {
1358   transition(vtos, vtos);
1359   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1360 }
1361 
1362 
1363 void TemplateTable::dup() {
1364   transition(vtos, vtos);
1365   // stack: ..., a
1366   __ load_ptr(0, R0_tmp);
1367   __ push_ptr(R0_tmp);
1368   // stack: ..., a, a
1369 }
1370 
1371 
1372 void TemplateTable::dup_x1() {
1373   transition(vtos, vtos);
1374   // stack: ..., a, b
1375   __ load_ptr(0, R0_tmp);  // load b
1376   __ load_ptr(1, R2_tmp);  // load a
1377   __ store_ptr(1, R0_tmp); // store b
1378   __ store_ptr(0, R2_tmp); // store a
1379   __ push_ptr(R0_tmp);     // push b
1380   // stack: ..., b, a, b
1381 }
1382 
1383 
1384 void TemplateTable::dup_x2() {
1385   transition(vtos, vtos);
1386   // stack: ..., a, b, c
1387   __ load_ptr(0, R0_tmp);   // load c
1388   __ load_ptr(1, R2_tmp);   // load b
1389   __ load_ptr(2, R4_tmp);   // load a
1390 
1391   __ push_ptr(R0_tmp);      // push c
1392 
1393   // stack: ..., a, b, c, c
1394   __ store_ptr(1, R2_tmp);  // store b
1395   __ store_ptr(2, R4_tmp);  // store a
1396   __ store_ptr(3, R0_tmp);  // store c
1397   // stack: ..., c, a, b, c
1398 }
1399 
1400 
1401 void TemplateTable::dup2() {
1402   transition(vtos, vtos);
1403   // stack: ..., a, b
1404   __ load_ptr(1, R0_tmp);  // load a
1405   __ push_ptr(R0_tmp);     // push a
1406   __ load_ptr(1, R0_tmp);  // load b
1407   __ push_ptr(R0_tmp);     // push b
1408   // stack: ..., a, b, a, b
1409 }
1410 
1411 
1412 void TemplateTable::dup2_x1() {
1413   transition(vtos, vtos);
1414 
1415   // stack: ..., a, b, c
1416   __ load_ptr(0, R4_tmp);  // load c
1417   __ load_ptr(1, R2_tmp);  // load b
1418   __ load_ptr(2, R0_tmp);  // load a
1419 
1420   __ push_ptr(R2_tmp);     // push b
1421   __ push_ptr(R4_tmp);     // push c
1422 
1423   // stack: ..., a, b, c, b, c
1424 
1425   __ store_ptr(2, R0_tmp);  // store a
1426   __ store_ptr(3, R4_tmp);  // store c
1427   __ store_ptr(4, R2_tmp);  // store b
1428 
1429   // stack: ..., b, c, a, b, c
1430 }
1431 
1432 
1433 void TemplateTable::dup2_x2() {
1434   transition(vtos, vtos);
1435   // stack: ..., a, b, c, d
1436   __ load_ptr(0, R0_tmp);  // load d
1437   __ load_ptr(1, R2_tmp);  // load c
1438   __ push_ptr(R2_tmp);     // push c
1439   __ push_ptr(R0_tmp);     // push d
1440   // stack: ..., a, b, c, d, c, d
1441   __ load_ptr(4, R4_tmp);  // load b
1442   __ store_ptr(4, R0_tmp); // store d in b
1443   __ store_ptr(2, R4_tmp); // store b in d
1444   // stack: ..., a, d, c, b, c, d
1445   __ load_ptr(5, R4_tmp);  // load a
1446   __ store_ptr(5, R2_tmp); // store c in a
1447   __ store_ptr(3, R4_tmp); // store a in c
1448   // stack: ..., c, d, a, b, c, d
1449 }
1450 
1451 
1452 void TemplateTable::swap() {
1453   transition(vtos, vtos);
1454   // stack: ..., a, b
1455   __ load_ptr(1, R0_tmp);  // load a
1456   __ load_ptr(0, R2_tmp);  // load b
1457   __ store_ptr(0, R0_tmp); // store a in b
1458   __ store_ptr(1, R2_tmp); // store b in a
1459   // stack: ..., b, a
1460 }
1461 
1462 
1463 void TemplateTable::iop2(Operation op) {
1464   transition(itos, itos);
1465   const Register arg1 = R1_tmp;
1466   const Register arg2 = R0_tos;
1467 
1468   __ pop_i(arg1);
1469   switch (op) {
1470     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1471     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1472     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1473     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1474     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1475     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1476     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1477     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1478     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1479     default   : ShouldNotReachHere();
1480   }
1481 }
1482 
1483 
1484 void TemplateTable::lop2(Operation op) {
1485   transition(ltos, ltos);
1486   const Register arg1_lo = R2_tmp;
1487   const Register arg1_hi = R3_tmp;
1488   const Register arg2_lo = R0_tos_lo;
1489   const Register arg2_hi = R1_tos_hi;
1490 
1491   __ pop_l(arg1_lo, arg1_hi);
1492   switch (op) {
1493     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1494     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1495     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1496     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1497     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1498     default : ShouldNotReachHere();
1499   }
1500 }
1501 
1502 
1503 void TemplateTable::idiv() {
1504   transition(itos, itos);
1505   __ mov(R2, R0_tos);
1506   __ pop_i(R0);
1507   // R0 - dividend
1508   // R2 - divisor
1509   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1510   // R1 - result
1511   __ mov(R0_tos, R1);
1512 }
1513 
1514 
1515 void TemplateTable::irem() {
1516   transition(itos, itos);
1517   __ mov(R2, R0_tos);
1518   __ pop_i(R0);
1519   // R0 - dividend
1520   // R2 - divisor
1521   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1522   // R0 - remainder
1523 }
1524 
1525 
1526 void TemplateTable::lmul() {
1527   transition(ltos, ltos);
1528   const Register arg1_lo = R0_tos_lo;
1529   const Register arg1_hi = R1_tos_hi;
1530   const Register arg2_lo = R2_tmp;
1531   const Register arg2_hi = R3_tmp;
1532 
1533   __ pop_l(arg2_lo, arg2_hi);
1534 
1535   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1536 }
1537 
1538 
1539 void TemplateTable::ldiv() {
1540   transition(ltos, ltos);
1541   const Register x_lo = R2_tmp;
1542   const Register x_hi = R3_tmp;
1543   const Register y_lo = R0_tos_lo;
1544   const Register y_hi = R1_tos_hi;
1545 
1546   __ pop_l(x_lo, x_hi);
1547 
1548   // check if y = 0
1549   __ orrs(Rtemp, y_lo, y_hi);
1550   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1551   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1552 }
1553 
1554 
1555 void TemplateTable::lrem() {
1556   transition(ltos, ltos);
1557   const Register x_lo = R2_tmp;
1558   const Register x_hi = R3_tmp;
1559   const Register y_lo = R0_tos_lo;
1560   const Register y_hi = R1_tos_hi;
1561 
1562   __ pop_l(x_lo, x_hi);
1563 
1564   // check if y = 0
1565   __ orrs(Rtemp, y_lo, y_hi);
1566   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1567   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1568 }
1569 
1570 
1571 void TemplateTable::lshl() {
1572   transition(itos, ltos);
1573   const Register shift_cnt = R4_tmp;
1574   const Register val_lo = R2_tmp;
1575   const Register val_hi = R3_tmp;
1576 
1577   __ pop_l(val_lo, val_hi);
1578   __ andr(shift_cnt, R0_tos, 63);
1579   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1580 }
1581 
1582 
1583 void TemplateTable::lshr() {
1584   transition(itos, ltos);
1585   const Register shift_cnt = R4_tmp;
1586   const Register val_lo = R2_tmp;
1587   const Register val_hi = R3_tmp;
1588 
1589   __ pop_l(val_lo, val_hi);
1590   __ andr(shift_cnt, R0_tos, 63);
1591   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1592 }
1593 
1594 
1595 void TemplateTable::lushr() {
1596   transition(itos, ltos);
1597   const Register shift_cnt = R4_tmp;
1598   const Register val_lo = R2_tmp;
1599   const Register val_hi = R3_tmp;
1600 
1601   __ pop_l(val_lo, val_hi);
1602   __ andr(shift_cnt, R0_tos, 63);
1603   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1604 }
1605 
1606 
1607 void TemplateTable::fop2(Operation op) {
1608   transition(ftos, ftos);
1609 #ifdef __SOFTFP__
1610   __ mov(R1, R0_tos);
1611   __ pop_i(R0);
1612   switch (op) {
1613     // __aeabi_XXXX_extlib: Optional wrapper around SoftFloat-3e
1614     // for calculation accuracy improvement. See CR 6757269, JDK-8215902.
1615     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_extlib), R0, R1); break;
1616     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_extlib), R0, R1); break;
1617     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1618     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1619     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1620     default : ShouldNotReachHere();
1621   }
1622 #else
1623   const FloatRegister arg1 = S1_tmp;
1624   const FloatRegister arg2 = S0_tos;
1625 
1626   switch (op) {
1627     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1628     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1629     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1630     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1631     case rem:
1632 #ifndef __ABI_HARD__
1633       __ pop_f(arg1);
1634       __ fmrs(R0, arg1);
1635       __ fmrs(R1, arg2);
1636       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1637       __ fmsr(S0_tos, R0);
1638 #else
1639       __ mov_float(S1_reg, arg2);
1640       __ pop_f(S0);
1641       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1642 #endif // !__ABI_HARD__
1643       break;
1644     default : ShouldNotReachHere();
1645   }
1646 #endif // __SOFTFP__
1647 }
1648 
1649 
1650 void TemplateTable::dop2(Operation op) {
1651   transition(dtos, dtos);
1652 #ifdef __SOFTFP__
1653   __ mov(R2, R0_tos_lo);
1654   __ mov(R3, R1_tos_hi);
1655   __ pop_l(R0, R1);
1656   switch (op) {
1657     // __aeabi_XXXX_extlib: Optional wrapper around SoftFloat-3e
1658     // for calculation accuracy improvement. See CR 6757269, JDK-8215902.
1659     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_extlib), R0, R1, R2, R3); break;
1660     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_extlib), R0, R1, R2, R3); break;
1661     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1662     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1663     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1664     default : ShouldNotReachHere();
1665   }
1666 #else
1667   const FloatRegister arg1 = D1_tmp;
1668   const FloatRegister arg2 = D0_tos;
1669 
1670   switch (op) {
1671     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1672     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1673     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1674     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1675     case rem:
1676 #ifndef __ABI_HARD__
1677       __ pop_d(arg1);
1678       __ fmrrd(R0, R1, arg1);
1679       __ fmrrd(R2, R3, arg2);
1680       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1681       __ fmdrr(D0_tos, R0, R1);
1682 #else
1683       __ mov_double(D1, arg2);
1684       __ pop_d(D0);
1685       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1686 #endif // !__ABI_HARD__
1687       break;
1688     default : ShouldNotReachHere();
1689   }
1690 #endif // __SOFTFP__
1691 }
1692 
1693 
1694 void TemplateTable::ineg() {
1695   transition(itos, itos);
1696   __ neg_32(R0_tos, R0_tos);
1697 }
1698 
1699 
1700 void TemplateTable::lneg() {
1701   transition(ltos, ltos);
1702   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1703   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1704 }
1705 
1706 
1707 void TemplateTable::fneg() {
1708   transition(ftos, ftos);
1709 #ifdef __SOFTFP__
1710   // Invert sign bit
1711   const int sign_mask = 0x80000000;
1712   __ eor(R0_tos, R0_tos, sign_mask);
1713 #else
1714   __ neg_float(S0_tos, S0_tos);
1715 #endif // __SOFTFP__
1716 }
1717 
1718 
1719 void TemplateTable::dneg() {
1720   transition(dtos, dtos);
1721 #ifdef __SOFTFP__
1722   // Invert sign bit in the high part of the double
1723   const int sign_mask_hi = 0x80000000;
1724   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1725 #else
1726   __ neg_double(D0_tos, D0_tos);
1727 #endif // __SOFTFP__
1728 }
1729 
1730 
1731 void TemplateTable::iinc() {
1732   transition(vtos, vtos);
1733   const Register Rconst = R2_tmp;
1734   const Register Rlocal_index = R1_tmp;
1735   const Register Rval = R0_tmp;
1736 
1737   __ ldrsb(Rconst, at_bcp(2));
1738   locals_index(Rlocal_index);
1739   Address local = load_iaddress(Rlocal_index, Rtemp);
1740   __ ldr_s32(Rval, local);
1741   __ add(Rval, Rval, Rconst);
1742   __ str_32(Rval, local);
1743 }
1744 
1745 
1746 void TemplateTable::wide_iinc() {
1747   transition(vtos, vtos);
1748   const Register Rconst = R2_tmp;
1749   const Register Rlocal_index = R1_tmp;
1750   const Register Rval = R0_tmp;
1751 
1752   // get constant in Rconst
1753   __ ldrsb(R2_tmp, at_bcp(4));
1754   __ ldrb(R3_tmp, at_bcp(5));
1755   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1756 
1757   locals_index_wide(Rlocal_index);
1758   Address local = load_iaddress(Rlocal_index, Rtemp);
1759   __ ldr_s32(Rval, local);
1760   __ add(Rval, Rval, Rconst);
1761   __ str_32(Rval, local);
1762 }
1763 
1764 
1765 void TemplateTable::convert() {
1766   // Checking
1767 #ifdef ASSERT
1768   { TosState tos_in  = ilgl;
1769     TosState tos_out = ilgl;
1770     switch (bytecode()) {
1771       case Bytecodes::_i2l: // fall through
1772       case Bytecodes::_i2f: // fall through
1773       case Bytecodes::_i2d: // fall through
1774       case Bytecodes::_i2b: // fall through
1775       case Bytecodes::_i2c: // fall through
1776       case Bytecodes::_i2s: tos_in = itos; break;
1777       case Bytecodes::_l2i: // fall through
1778       case Bytecodes::_l2f: // fall through
1779       case Bytecodes::_l2d: tos_in = ltos; break;
1780       case Bytecodes::_f2i: // fall through
1781       case Bytecodes::_f2l: // fall through
1782       case Bytecodes::_f2d: tos_in = ftos; break;
1783       case Bytecodes::_d2i: // fall through
1784       case Bytecodes::_d2l: // fall through
1785       case Bytecodes::_d2f: tos_in = dtos; break;
1786       default             : ShouldNotReachHere();
1787     }
1788     switch (bytecode()) {
1789       case Bytecodes::_l2i: // fall through
1790       case Bytecodes::_f2i: // fall through
1791       case Bytecodes::_d2i: // fall through
1792       case Bytecodes::_i2b: // fall through
1793       case Bytecodes::_i2c: // fall through
1794       case Bytecodes::_i2s: tos_out = itos; break;
1795       case Bytecodes::_i2l: // fall through
1796       case Bytecodes::_f2l: // fall through
1797       case Bytecodes::_d2l: tos_out = ltos; break;
1798       case Bytecodes::_i2f: // fall through
1799       case Bytecodes::_l2f: // fall through
1800       case Bytecodes::_d2f: tos_out = ftos; break;
1801       case Bytecodes::_i2d: // fall through
1802       case Bytecodes::_l2d: // fall through
1803       case Bytecodes::_f2d: tos_out = dtos; break;
1804       default             : ShouldNotReachHere();
1805     }
1806     transition(tos_in, tos_out);
1807   }
1808 #endif // ASSERT
1809 
1810   // Conversion
1811   switch (bytecode()) {
1812     case Bytecodes::_i2l:
1813       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1814       break;
1815 
1816     case Bytecodes::_i2f:
1817 #ifdef __SOFTFP__
1818       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1819 #else
1820       __ fmsr(S0_tmp, R0_tos);
1821       __ fsitos(S0_tos, S0_tmp);
1822 #endif // __SOFTFP__
1823       break;
1824 
1825     case Bytecodes::_i2d:
1826 #ifdef __SOFTFP__
1827       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1828 #else
1829       __ fmsr(S0_tmp, R0_tos);
1830       __ fsitod(D0_tos, S0_tmp);
1831 #endif // __SOFTFP__
1832       break;
1833 
1834     case Bytecodes::_i2b:
1835       __ sign_extend(R0_tos, R0_tos, 8);
1836       break;
1837 
1838     case Bytecodes::_i2c:
1839       __ zero_extend(R0_tos, R0_tos, 16);
1840       break;
1841 
1842     case Bytecodes::_i2s:
1843       __ sign_extend(R0_tos, R0_tos, 16);
1844       break;
1845 
1846     case Bytecodes::_l2i:
1847       /* nothing to do */
1848       break;
1849 
1850     case Bytecodes::_l2f:
1851       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1852 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1853       __ fmsr(S0_tos, R0);
1854 #endif // !__SOFTFP__ && !__ABI_HARD__
1855       break;
1856 
1857     case Bytecodes::_l2d:
1858       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1859 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1860       __ fmdrr(D0_tos, R0, R1);
1861 #endif // !__SOFTFP__ && !__ABI_HARD__
1862       break;
1863 
1864     case Bytecodes::_f2i:
1865 #ifndef __SOFTFP__
1866       __ ftosizs(S0_tos, S0_tos);
1867       __ fmrs(R0_tos, S0_tos);
1868 #else
1869       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1870 #endif // !__SOFTFP__
1871       break;
1872 
1873     case Bytecodes::_f2l:
1874 #ifndef __SOFTFP__
1875       __ fmrs(R0_tos, S0_tos);
1876 #endif // !__SOFTFP__
1877       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
1878       break;
1879 
1880     case Bytecodes::_f2d:
1881 #ifdef __SOFTFP__
1882       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
1883 #else
1884       __ convert_f2d(D0_tos, S0_tos);
1885 #endif // __SOFTFP__
1886       break;
1887 
1888     case Bytecodes::_d2i:
1889 #ifndef __SOFTFP__
1890       __ ftosizd(Stemp, D0);
1891       __ fmrs(R0, Stemp);
1892 #else
1893       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
1894 #endif // !__SOFTFP__
1895       break;
1896 
1897     case Bytecodes::_d2l:
1898 #ifndef __SOFTFP__
1899       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
1900 #endif // !__SOFTFP__
1901       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
1902       break;
1903 
1904     case Bytecodes::_d2f:
1905 #ifdef __SOFTFP__
1906       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
1907 #else
1908       __ convert_d2f(S0_tos, D0_tos);
1909 #endif // __SOFTFP__
1910       break;
1911 
1912     default:
1913       ShouldNotReachHere();
1914   }
1915 }
1916 
1917 
1918 void TemplateTable::lcmp() {
1919   transition(ltos, itos);
1920   const Register arg1_lo = R2_tmp;
1921   const Register arg1_hi = R3_tmp;
1922   const Register arg2_lo = R0_tos_lo;
1923   const Register arg2_hi = R1_tos_hi;
1924   const Register res = R4_tmp;
1925 
1926   __ pop_l(arg1_lo, arg1_hi);
1927 
1928   // long compare arg1 with arg2
1929   // result is -1/0/+1 if '<'/'='/'>'
1930   Label done;
1931 
1932   __ mov (res, 0);
1933   __ cmp (arg1_hi, arg2_hi);
1934   __ mvn (res, 0, lt);
1935   __ mov (res, 1, gt);
1936   __ b(done, ne);
1937   __ cmp (arg1_lo, arg2_lo);
1938   __ mvn (res, 0, lo);
1939   __ mov (res, 1, hi);
1940   __ bind(done);
1941   __ mov (R0_tos, res);
1942 }
1943 
1944 
1945 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1946   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
1947 
1948 
1949 #ifdef __SOFTFP__
1950 
1951   if (is_float) {
1952     transition(ftos, itos);
1953     const Register Rx = R0;
1954     const Register Ry = R1;
1955 
1956     __ mov(Ry, R0_tos);
1957     __ pop_i(Rx);
1958 
1959     if (unordered_result == 1) {
1960       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
1961     } else {
1962       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
1963     }
1964 
1965   } else {
1966 
1967     transition(dtos, itos);
1968     const Register Rx_lo = R0;
1969     const Register Rx_hi = R1;
1970     const Register Ry_lo = R2;
1971     const Register Ry_hi = R3;
1972 
1973     __ mov(Ry_lo, R0_tos_lo);
1974     __ mov(Ry_hi, R1_tos_hi);
1975     __ pop_l(Rx_lo, Rx_hi);
1976 
1977     if (unordered_result == 1) {
1978       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
1979     } else {
1980       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
1981     }
1982   }
1983 
1984 #else
1985 
1986   if (is_float) {
1987     transition(ftos, itos);
1988     __ pop_f(S1_tmp);
1989     __ fcmps(S1_tmp, S0_tos);
1990   } else {
1991     transition(dtos, itos);
1992     __ pop_d(D1_tmp);
1993     __ fcmpd(D1_tmp, D0_tos);
1994   }
1995 
1996   __ fmstat();
1997 
1998   // comparison result | flag N | flag Z | flag C | flag V
1999   // "<"               |   1    |   0    |   0    |   0
2000   // "=="              |   0    |   1    |   1    |   0
2001   // ">"               |   0    |   0    |   1    |   0
2002   // unordered         |   0    |   0    |   1    |   1
2003 
2004   if (unordered_result < 0) {
2005     __ mov(R0_tos, 1);           // result ==  1 if greater
2006     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2007   } else {
2008     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2009     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2010   }
2011   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2012 #endif // __SOFTFP__
2013 }
2014 
2015 
2016 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2017 
2018   const Register Rdisp = R0_tmp;
2019   const Register Rbumped_taken_count = R5_tmp;
2020 
2021   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2022 
2023   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2024                              InvocationCounter::counter_offset();
2025   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2026                               InvocationCounter::counter_offset();
2027   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2028 
2029   // Load up R0 with the branch displacement
2030   if (is_wide) {
2031     __ ldrsb(R0_tmp, at_bcp(1));
2032     __ ldrb(R1_tmp, at_bcp(2));
2033     __ ldrb(R2_tmp, at_bcp(3));
2034     __ ldrb(R3_tmp, at_bcp(4));
2035     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2036     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2037     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2038   } else {
2039     __ ldrsb(R0_tmp, at_bcp(1));
2040     __ ldrb(R1_tmp, at_bcp(2));
2041     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2042   }
2043 
2044   // Handle all the JSR stuff here, then exit.
2045   // It's much shorter and cleaner than intermingling with the
2046   // non-JSR normal-branch stuff occuring below.
2047   if (is_jsr) {
2048     // compute return address as bci in R1
2049     const Register Rret_addr = R1_tmp;
2050     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2051 
2052     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2053     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2054     __ sub(Rret_addr, Rret_addr, Rtemp);
2055 
2056     // Load the next target bytecode into R3_bytecode and advance Rbcp
2057     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2058 
2059     // Push return address
2060     __ push_i(Rret_addr);
2061     // jsr returns vtos
2062     __ dispatch_only_noverify(vtos);
2063     return;
2064   }
2065 
2066   // Normal (non-jsr) branch handling
2067 
2068   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2069   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2070 
2071   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2072   Label backedge_counter_overflow;
2073   Label profile_method;
2074   Label dispatch;
2075 
2076   if (UseLoopCounter) {
2077     // increment backedge counter for backward branches
2078     // Rdisp (R0): target offset
2079 
2080     const Register Rcnt = R2_tmp;
2081     const Register Rcounters = R1_tmp;
2082 
2083     // count only if backward branch
2084     __ tst(Rdisp, Rdisp);
2085     __ b(dispatch, pl);
2086 
2087     if (TieredCompilation) {
2088       Label no_mdo;
2089       int increment = InvocationCounter::count_increment;
2090       if (ProfileInterpreter) {
2091         // Are we profiling?
2092         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2093         __ cbz(Rtemp, no_mdo);
2094         // Increment the MDO backedge counter
2095         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2096                                                   in_bytes(InvocationCounter::counter_offset()));
2097         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2098         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2099                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2100         __ b(dispatch);
2101       }
2102       __ bind(no_mdo);
2103       // Increment backedge counter in MethodCounters*
2104       // Note Rbumped_taken_count is a callee saved registers for ARM32
2105       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2106                              Rdisp, R3_bytecode,
2107                              noreg);
2108       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2109       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2110                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2111     } else {
2112       // Increment backedge counter in MethodCounters*
2113       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2114                              Rdisp, R3_bytecode,
2115                              noreg);
2116       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2117       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2118       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2119 
2120       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2121       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2122       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2123 
2124       if (ProfileInterpreter) {
2125         // Test to see if we should create a method data oop
2126         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2127         __ ldr_s32(Rtemp, profile_limit);
2128         __ cmp_32(Rcnt, Rtemp);
2129         __ b(dispatch, lt);
2130 
2131         // if no method data exists, go to profile method
2132         __ test_method_data_pointer(R4_tmp, profile_method);
2133 
2134         if (UseOnStackReplacement) {
2135           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2136           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2137           __ ldr_s32(Rtemp, backward_branch_limit);
2138           __ cmp(Rbumped_taken_count, Rtemp);
2139           __ b(dispatch, lo);
2140 
2141           // When ProfileInterpreter is on, the backedge_count comes from the
2142           // MethodData*, which value does not get reset on the call to
2143           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2144           // routine while the method is being compiled, add a second test to make
2145           // sure the overflow function is called only once every overflow_frequency.
2146           const int overflow_frequency = 1024;
2147 
2148           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2149           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2150           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2151 
2152           __ b(backedge_counter_overflow, eq);
2153         }
2154       } else {
2155         if (UseOnStackReplacement) {
2156           // check for overflow against Rcnt, which is the sum of the counters
2157           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2158           __ ldr_s32(Rtemp, backward_branch_limit);
2159           __ cmp_32(Rcnt, Rtemp);
2160           __ b(backedge_counter_overflow, hs);
2161 
2162         }
2163       }
2164     }
2165     __ bind(dispatch);
2166   }
2167 
2168   if (!UseOnStackReplacement) {
2169     __ bind(backedge_counter_overflow);
2170   }
2171 
2172   // continue with the bytecode @ target
2173   __ dispatch_only(vtos);
2174 
2175   if (UseLoopCounter) {
2176     if (ProfileInterpreter) {
2177       // Out-of-line code to allocate method data oop.
2178       __ bind(profile_method);
2179 
2180       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2181       __ set_method_data_pointer_for_bcp();
2182       // reload next bytecode
2183       __ ldrb(R3_bytecode, Address(Rbcp));
2184       __ b(dispatch);
2185     }
2186 
2187     if (UseOnStackReplacement) {
2188       // invocation counter overflow
2189       __ bind(backedge_counter_overflow);
2190 
2191       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2192       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2193 
2194       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2195       const Register Rnmethod = R0;
2196 
2197       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2198 
2199       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2200 
2201       // nmethod may have been invalidated (VM may block upon call_VM return)
2202       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2203       __ cmp(R1_tmp, nmethod::in_use);
2204       __ b(dispatch, ne);
2205 
2206       // We have the address of an on stack replacement routine in Rnmethod,
2207       // We need to prepare to execute the OSR method. First we must
2208       // migrate the locals and monitors off of the stack.
2209 
2210       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2211 
2212       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2213 
2214       // R0 is OSR buffer
2215 
2216       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2217       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2218 
2219       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2220       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2221 
2222       __ jump(R1_tmp);
2223     }
2224   }
2225 }
2226 
2227 
2228 void TemplateTable::if_0cmp(Condition cc) {
2229   transition(itos, vtos);
2230   // assume branch is more often taken than not (loops use backward branches)
2231   Label not_taken;
2232   __ cmp_32(R0_tos, 0);
2233   __ b(not_taken, convNegCond(cc));
2234   branch(false, false);
2235   __ bind(not_taken);
2236   __ profile_not_taken_branch(R0_tmp);
2237 }
2238 
2239 
2240 void TemplateTable::if_icmp(Condition cc) {
2241   transition(itos, vtos);
2242   // assume branch is more often taken than not (loops use backward branches)
2243   Label not_taken;
2244   __ pop_i(R1_tmp);
2245   __ cmp_32(R1_tmp, R0_tos);
2246   __ b(not_taken, convNegCond(cc));
2247   branch(false, false);
2248   __ bind(not_taken);
2249   __ profile_not_taken_branch(R0_tmp);
2250 }
2251 
2252 
2253 void TemplateTable::if_nullcmp(Condition cc) {
2254   transition(atos, vtos);
2255   assert(cc == equal || cc == not_equal, "invalid condition");
2256 
2257   // assume branch is more often taken than not (loops use backward branches)
2258   Label not_taken;
2259   if (cc == equal) {
2260     __ cbnz(R0_tos, not_taken);
2261   } else {
2262     __ cbz(R0_tos, not_taken);
2263   }
2264   branch(false, false);
2265   __ bind(not_taken);
2266   __ profile_not_taken_branch(R0_tmp);
2267 }
2268 
2269 
2270 void TemplateTable::if_acmp(Condition cc) {
2271   transition(atos, vtos);
2272   // assume branch is more often taken than not (loops use backward branches)
2273   Label not_taken;
2274   __ pop_ptr(R1_tmp);
2275   __ cmpoop(R1_tmp, R0_tos);
2276   __ b(not_taken, convNegCond(cc));
2277   branch(false, false);
2278   __ bind(not_taken);
2279   __ profile_not_taken_branch(R0_tmp);
2280 }
2281 
2282 
2283 void TemplateTable::ret() {
2284   transition(vtos, vtos);
2285   const Register Rlocal_index = R1_tmp;
2286   const Register Rret_bci = Rtmp_save0; // R4/R19
2287 
2288   locals_index(Rlocal_index);
2289   Address local = load_iaddress(Rlocal_index, Rtemp);
2290   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2291   __ profile_ret(Rtmp_save1, Rret_bci);
2292   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2293   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2294   __ add(Rbcp, Rtemp, Rret_bci);
2295   __ dispatch_next(vtos);
2296 }
2297 
2298 
2299 void TemplateTable::wide_ret() {
2300   transition(vtos, vtos);
2301   const Register Rlocal_index = R1_tmp;
2302   const Register Rret_bci = Rtmp_save0; // R4/R19
2303 
2304   locals_index_wide(Rlocal_index);
2305   Address local = load_iaddress(Rlocal_index, Rtemp);
2306   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2307   __ profile_ret(Rtmp_save1, Rret_bci);
2308   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2309   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2310   __ add(Rbcp, Rtemp, Rret_bci);
2311   __ dispatch_next(vtos);
2312 }
2313 
2314 
2315 void TemplateTable::tableswitch() {
2316   transition(itos, vtos);
2317 
2318   const Register Rindex  = R0_tos;
2319   const Register Rtemp2  = R1_tmp;
2320   const Register Rabcp   = R2_tmp;  // aligned bcp
2321   const Register Rlow    = R3_tmp;
2322   const Register Rhigh   = R4_tmp;
2323   const Register Roffset = R5_tmp;
2324 
2325   // align bcp
2326   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2327   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2328 
2329   // load lo & hi
2330   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2331   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2332   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2333 
2334   // compare index with high bound
2335   __ cmp_32(Rhigh, Rindex);
2336 
2337 
2338   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2339   __ subs(Rindex, Rindex, Rlow, ge);
2340 
2341   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2342   // ("ge" status accumulated from cmp and subs instructions) then load
2343   // offset from table, otherwise load offset for default case
2344 
2345   if(ProfileInterpreter) {
2346     Label default_case, continue_execution;
2347 
2348     __ b(default_case, lt);
2349     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2350     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2351     __ b(continue_execution);
2352 
2353     __ bind(default_case);
2354     __ profile_switch_default(R0_tmp);
2355     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2356 
2357     __ bind(continue_execution);
2358   } else {
2359     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2360     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2361   }
2362 
2363   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2364 
2365   // load the next bytecode to R3_bytecode and advance Rbcp
2366   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2367   __ dispatch_only(vtos);
2368 
2369 }
2370 
2371 
2372 void TemplateTable::lookupswitch() {
2373   transition(itos, itos);
2374   __ stop("lookupswitch bytecode should have been rewritten");
2375 }
2376 
2377 
2378 void TemplateTable::fast_linearswitch() {
2379   transition(itos, vtos);
2380   Label loop, found, default_case, continue_execution;
2381 
2382   const Register Rkey     = R0_tos;
2383   const Register Rabcp    = R2_tmp;  // aligned bcp
2384   const Register Rdefault = R3_tmp;
2385   const Register Rcount   = R4_tmp;
2386   const Register Roffset  = R5_tmp;
2387 
2388   // bswap Rkey, so we can avoid bswapping the table entries
2389   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2390 
2391   // align bcp
2392   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2393   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2394 
2395   // load default & counter
2396   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2397   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2398 
2399   __ cmp_32(Rcount, 0);
2400   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2401   __ b(default_case, eq);
2402 
2403   // table search
2404   __ bind(loop);
2405   __ cmp_32(Rtemp, Rkey);
2406   __ b(found, eq);
2407   __ subs(Rcount, Rcount, 1);
2408   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2409   __ b(loop, ne);
2410 
2411   // default case
2412   __ bind(default_case);
2413   __ profile_switch_default(R0_tmp);
2414   __ mov(Roffset, Rdefault);
2415   __ b(continue_execution);
2416 
2417   // entry found -> get offset
2418   __ bind(found);
2419   // Rabcp is already incremented and points to the next entry
2420   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2421   if (ProfileInterpreter) {
2422     // Calculate index of the selected case.
2423     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2424 
2425     // align bcp
2426     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2427     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2428 
2429     // load number of cases
2430     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2431     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2432 
2433     // Selected index = <number of cases> - <current loop count>
2434     __ sub(R1_tmp, R2_tmp, Rcount);
2435     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2436   }
2437 
2438   // continue execution
2439   __ bind(continue_execution);
2440   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2441 
2442   // load the next bytecode to R3_bytecode and advance Rbcp
2443   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2444   __ dispatch_only(vtos);
2445 }
2446 
2447 
2448 void TemplateTable::fast_binaryswitch() {
2449   transition(itos, vtos);
2450   // Implementation using the following core algorithm:
2451   //
2452   // int binary_search(int key, LookupswitchPair* array, int n) {
2453   //   // Binary search according to "Methodik des Programmierens" by
2454   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2455   //   int i = 0;
2456   //   int j = n;
2457   //   while (i+1 < j) {
2458   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2459   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2460   //     // where a stands for the array and assuming that the (inexisting)
2461   //     // element a[n] is infinitely big.
2462   //     int h = (i + j) >> 1;
2463   //     // i < h < j
2464   //     if (key < array[h].fast_match()) {
2465   //       j = h;
2466   //     } else {
2467   //       i = h;
2468   //     }
2469   //   }
2470   //   // R: a[i] <= key < a[i+1] or Q
2471   //   // (i.e., if key is within array, i is the correct index)
2472   //   return i;
2473   // }
2474 
2475   // register allocation
2476   const Register key    = R0_tos;                // already set (tosca)
2477   const Register array  = R1_tmp;
2478   const Register i      = R2_tmp;
2479   const Register j      = R3_tmp;
2480   const Register h      = R4_tmp;
2481   const Register val    = R5_tmp;
2482   const Register temp1  = Rtemp;
2483   const Register temp2  = LR_tmp;
2484   const Register offset = R3_tmp;
2485 
2486   // set 'array' = aligned bcp + 2 ints
2487   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2488   __ align_reg(array, temp1, BytesPerInt);
2489 
2490   // initialize i & j
2491   __ mov(i, 0);                                  // i = 0;
2492   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2493   // Convert j into native byteordering
2494   __ byteswap_u32(j, temp1, temp2);
2495 
2496   // and start
2497   Label entry;
2498   __ b(entry);
2499 
2500   // binary search loop
2501   { Label loop;
2502     __ bind(loop);
2503     // int h = (i + j) >> 1;
2504     __ add(h, i, j);                             // h = i + j;
2505     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2506     // if (key < array[h].fast_match()) {
2507     //   j = h;
2508     // } else {
2509     //   i = h;
2510     // }
2511     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2512     // Convert array[h].match to native byte-ordering before compare
2513     __ byteswap_u32(val, temp1, temp2);
2514     __ cmp_32(key, val);
2515     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2516     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2517     // while (i+1 < j)
2518     __ bind(entry);
2519     __ add(temp1, i, 1);                             // i+1
2520     __ cmp(temp1, j);                                // i+1 < j
2521     __ b(loop, lt);
2522   }
2523 
2524   // end of binary search, result index is i (must check again!)
2525   Label default_case;
2526   // Convert array[i].match to native byte-ordering before compare
2527   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2528   __ byteswap_u32(val, temp1, temp2);
2529   __ cmp_32(key, val);
2530   __ b(default_case, ne);
2531 
2532   // entry found
2533   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2534   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2535   __ profile_switch_case(R0, i, R1, i);
2536   __ byteswap_u32(offset, temp1, temp2);
2537   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2538   __ dispatch_only(vtos);
2539 
2540   // default case
2541   __ bind(default_case);
2542   __ profile_switch_default(R0);
2543   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2544   __ byteswap_u32(offset, temp1, temp2);
2545   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2546   __ dispatch_only(vtos);
2547 }
2548 
2549 
2550 void TemplateTable::_return(TosState state) {
2551   transition(state, state);
2552   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2553 
2554   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2555     Label skip_register_finalizer;
2556     assert(state == vtos, "only valid state");
2557     __ ldr(R1, aaddress(0));
2558     __ load_klass(Rtemp, R1);
2559     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2560     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2561 
2562     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2563 
2564     __ bind(skip_register_finalizer);
2565   }
2566 
2567   // Narrow result if state is itos but result type is smaller.
2568   // Need to narrow in the return bytecode rather than in generate_return_entry
2569   // since compiled code callers expect the result to already be narrowed.
2570   if (state == itos) {
2571     __ narrow(R0_tos);
2572   }
2573   __ remove_activation(state, LR);
2574 
2575   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2576 
2577   // According to interpreter calling conventions, result is returned in R0/R1,
2578   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2579   // This conversion should be done after remove_activation, as it uses
2580   // push(state) & pop(state) to preserve return value.
2581   __ convert_tos_to_retval(state);
2582 
2583   __ ret();
2584 
2585   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2586   __ nop();
2587 }
2588 
2589 
2590 // ----------------------------------------------------------------------------
2591 // Volatile variables demand their effects be made known to all CPU's in
2592 // order.  Store buffers on most chips allow reads & writes to reorder; the
2593 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2594 // memory barrier (i.e., it's not sufficient that the interpreter does not
2595 // reorder volatile references, the hardware also must not reorder them).
2596 //
2597 // According to the new Java Memory Model (JMM):
2598 // (1) All volatiles are serialized wrt to each other.
2599 // ALSO reads & writes act as aquire & release, so:
2600 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2601 // the read float up to before the read.  It's OK for non-volatile memory refs
2602 // that happen before the volatile read to float down below it.
2603 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2604 // that happen BEFORE the write float down to after the write.  It's OK for
2605 // non-volatile memory refs that happen after the volatile write to float up
2606 // before it.
2607 //
2608 // We only put in barriers around volatile refs (they are expensive), not
2609 // _between_ memory refs (that would require us to track the flavor of the
2610 // previous memory refs).  Requirements (2) and (3) require some barriers
2611 // before volatile stores and after volatile loads.  These nearly cover
2612 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2613 // case is placed after volatile-stores although it could just as well go
2614 // before volatile-loads.
2615 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2616                                      Register tmp,
2617                                      bool preserve_flags,
2618                                      Register load_tgt) {
2619   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2620 }
2621 
2622 // Blows all volatile registers: R0-R3, Rtemp, LR.
2623 void TemplateTable::resolve_cache_and_index(int byte_no,
2624                                             Register Rcache,
2625                                             Register Rindex,
2626                                             size_t index_size) {
2627   assert_different_registers(Rcache, Rindex, Rtemp);
2628 
2629   Label resolved;
2630   Bytecodes::Code code = bytecode();
2631   switch (code) {
2632   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2633   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2634   default: break;
2635   }
2636 
2637   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2638   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
2639   __ cmp(Rtemp, code);  // have we resolved this bytecode?
2640   __ b(resolved, eq);
2641 
2642   // resolve first time through
2643   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2644   __ mov(R1, code);
2645   __ call_VM(noreg, entry, R1);
2646   // Update registers with resolved info
2647   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
2648   __ bind(resolved);
2649 }
2650 
2651 
2652 // The Rcache and Rindex registers must be set before call
2653 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
2654                                               Register Rindex,
2655                                               Register Roffset,
2656                                               Register Rflags,
2657                                               Register Robj,
2658                                               bool is_static = false) {
2659 
2660   assert_different_registers(Rcache, Rindex, Rtemp);
2661   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
2662 
2663   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2664 
2665   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2666 
2667   // Field offset
2668   __ ldr(Roffset, Address(Rtemp,
2669            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2670 
2671   // Flags
2672   __ ldr_u32(Rflags, Address(Rtemp,
2673            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2674 
2675   if (is_static) {
2676     __ ldr(Robj, Address(Rtemp,
2677              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2678     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2679     __ ldr(Robj, Address(Robj, mirror_offset));
2680     __ resolve_oop_handle(Robj);
2681   }
2682 }
2683 
2684 
2685 // Blows all volatile registers: R0-R3, Rtemp, LR.
2686 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2687                                                Register method,
2688                                                Register itable_index,
2689                                                Register flags,
2690                                                bool is_invokevirtual,
2691                                                bool is_invokevfinal/*unused*/,
2692                                                bool is_invokedynamic) {
2693   // setup registers
2694   const Register cache = R2_tmp;
2695   const Register index = R3_tmp;
2696   const Register temp_reg = Rtemp;
2697   assert_different_registers(cache, index, temp_reg);
2698   assert_different_registers(method, itable_index, temp_reg);
2699 
2700   // determine constant pool cache field offsets
2701   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2702   const int method_offset = in_bytes(
2703     ConstantPoolCache::base_offset() +
2704       ((byte_no == f2_byte)
2705        ? ConstantPoolCacheEntry::f2_offset()
2706        : ConstantPoolCacheEntry::f1_offset()
2707       )
2708     );
2709   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2710                                     ConstantPoolCacheEntry::flags_offset());
2711   // access constant pool cache fields
2712   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2713                                     ConstantPoolCacheEntry::f2_offset());
2714 
2715   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2716   resolve_cache_and_index(byte_no, cache, index, index_size);
2717     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
2718     __ ldr(method, Address(temp_reg, method_offset));
2719 
2720   if (itable_index != noreg) {
2721     __ ldr(itable_index, Address(temp_reg, index_offset));
2722   }
2723   __ ldr_u32(flags, Address(temp_reg, flags_offset));
2724 }
2725 
2726 
2727 // The registers cache and index expected to be set before call, and should not be Rtemp.
2728 // Blows volatile registers R0-R3, Rtemp, LR,
2729 // except cache and index registers which are preserved.
2730 void TemplateTable::jvmti_post_field_access(Register Rcache,
2731                                             Register Rindex,
2732                                             bool is_static,
2733                                             bool has_tos) {
2734   assert_different_registers(Rcache, Rindex, Rtemp);
2735 
2736   if (__ can_post_field_access()) {
2737     // Check to see if a field access watch has been set before we take
2738     // the time to call into the VM.
2739 
2740     Label Lcontinue;
2741 
2742     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
2743     __ cbz(Rtemp, Lcontinue);
2744 
2745     // cache entry pointer
2746     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2747     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
2748     if (is_static) {
2749       __ mov(R1, 0);        // NULL object reference
2750     } else {
2751       __ pop(atos);         // Get the object
2752       __ mov(R1, R0_tos);
2753       __ verify_oop(R1);
2754       __ push(atos);        // Restore stack state
2755     }
2756     // R1: object pointer or NULL
2757     // R2: cache entry pointer
2758     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2759                R1, R2);
2760     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
2761 
2762     __ bind(Lcontinue);
2763   }
2764 }
2765 
2766 
2767 void TemplateTable::pop_and_check_object(Register r) {
2768   __ pop_ptr(r);
2769   __ null_check(r, Rtemp);  // for field access must check obj.
2770   __ verify_oop(r);
2771 }
2772 
2773 
2774 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2775   transition(vtos, vtos);
2776 
2777   const Register Roffset  = R2_tmp;
2778   const Register Robj     = R3_tmp;
2779   const Register Rcache   = R4_tmp;
2780   const Register Rflagsav = Rtmp_save0;  // R4/R19
2781   const Register Rindex   = R5_tmp;
2782   const Register Rflags   = R5_tmp;
2783 
2784   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
2785   jvmti_post_field_access(Rcache, Rindex, is_static, false);
2786   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
2787 
2788   __ mov(Rflagsav, Rflags);
2789 
2790   if (!is_static) pop_and_check_object(Robj);
2791 
2792   Label Done, Lint, Ltable, shouldNotReachHere;
2793   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
2794 
2795   // compute type
2796   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
2797   // Make sure we don't need to mask flags after the above shift
2798   ConstantPoolCacheEntry::verify_tos_state_shift();
2799 
2800   // There are actually two versions of implementation of getfield/getstatic:
2801   //
2802   // 1) Table switch using add(PC,...) instruction (fast_version)
2803   // 2) Table switch using ldr(PC,...) instruction
2804   //
2805   // First version requires fixed size of code block for each case and
2806   // can not be used in RewriteBytecodes and VerifyOops
2807   // modes.
2808 
2809   // Size of fixed size code block for fast_version
2810   const int log_max_block_size = 3;
2811   const int max_block_size = 1 << log_max_block_size;
2812 
2813   // Decide if fast version is enabled
2814   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops;
2815 
2816   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
2817   // atos requires additional processing in slow version.
2818   bool atos_merged_with_itos = fast_version;
2819 
2820   assert(number_of_states == 10, "number of tos states should be equal to 9");
2821 
2822   __ cmp(Rflags, itos);
2823   if(atos_merged_with_itos) {
2824     __ cmp(Rflags, atos, ne);
2825   }
2826 
2827   // table switch by type
2828   if(fast_version) {
2829     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
2830   } else {
2831     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
2832   }
2833 
2834   // jump to itos/atos case
2835   __ b(Lint);
2836 
2837   // table with addresses for slow version
2838   if (fast_version) {
2839     // nothing to do
2840   } else  {
2841     __ bind(Ltable);
2842     __ emit_address(Lbtos);
2843     __ emit_address(Lztos);
2844     __ emit_address(Lctos);
2845     __ emit_address(Lstos);
2846     __ emit_address(Litos);
2847     __ emit_address(Lltos);
2848     __ emit_address(Lftos);
2849     __ emit_address(Ldtos);
2850     __ emit_address(Latos);
2851   }
2852 
2853 #ifdef ASSERT
2854   int seq = 0;
2855 #endif
2856   // btos
2857   {
2858     assert(btos == seq++, "btos has unexpected value");
2859     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
2860     __ bind(Lbtos);
2861     __ access_load_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2862     __ push(btos);
2863     // Rewrite bytecode to be faster
2864     if (!is_static && rc == may_rewrite) {
2865       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
2866     }
2867     __ b(Done);
2868   }
2869 
2870   // ztos (same as btos for getfield)
2871   {
2872     assert(ztos == seq++, "btos has unexpected value");
2873     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
2874     __ bind(Lztos);
2875     __ access_load_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2876     __ push(ztos);
2877     // Rewrite bytecode to be faster (use btos fast getfield)
2878     if (!is_static && rc == may_rewrite) {
2879       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
2880     }
2881     __ b(Done);
2882   }
2883 
2884   // ctos
2885   {
2886     assert(ctos == seq++, "ctos has unexpected value");
2887     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
2888     __ bind(Lctos);
2889     __ access_load_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2890     __ push(ctos);
2891     if (!is_static && rc == may_rewrite) {
2892       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
2893     }
2894     __ b(Done);
2895   }
2896 
2897   // stos
2898   {
2899     assert(stos == seq++, "stos has unexpected value");
2900     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
2901     __ bind(Lstos);
2902     __ access_load_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2903     __ push(stos);
2904     if (!is_static && rc == may_rewrite) {
2905       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
2906     }
2907     __ b(Done);
2908   }
2909 
2910   // itos
2911   {
2912     assert(itos == seq++, "itos has unexpected value");
2913     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
2914     __ bind(Litos);
2915     __ b(shouldNotReachHere);
2916   }
2917 
2918   // ltos
2919   {
2920     assert(ltos == seq++, "ltos has unexpected value");
2921     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
2922     __ bind(Lltos);
2923     __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
2924     __ push(ltos);
2925     if (!is_static && rc == may_rewrite) {
2926       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
2927     }
2928     __ b(Done);
2929   }
2930 
2931   // ftos
2932   {
2933     assert(ftos == seq++, "ftos has unexpected value");
2934     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
2935     __ bind(Lftos);
2936     // floats and ints are placed on stack in same way, so
2937     // we can use push(itos) to transfer value without using VFP
2938     __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2939     __ push(itos);
2940     if (!is_static && rc == may_rewrite) {
2941       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
2942     }
2943     __ b(Done);
2944   }
2945 
2946   // dtos
2947   {
2948     assert(dtos == seq++, "dtos has unexpected value");
2949     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
2950     __ bind(Ldtos);
2951     // doubles and longs are placed on stack in the same way, so
2952     // we can use push(ltos) to transfer value without using VFP
2953     __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
2954     __ push(ltos);
2955     if (!is_static && rc == may_rewrite) {
2956       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
2957     }
2958     __ b(Done);
2959   }
2960 
2961   // atos
2962   {
2963     assert(atos == seq++, "atos has unexpected value");
2964 
2965     // atos case for slow version on 32-bit ARM
2966     if(!atos_merged_with_itos) {
2967       __ bind(Latos);
2968       do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
2969       __ push(atos);
2970       // Rewrite bytecode to be faster
2971       if (!is_static && rc == may_rewrite) {
2972         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
2973       }
2974       __ b(Done);
2975     }
2976   }
2977 
2978   assert(vtos == seq++, "vtos has unexpected value");
2979 
2980   __ bind(shouldNotReachHere);
2981   __ should_not_reach_here();
2982 
2983   // itos and atos cases are frequent so it makes sense to move them out of table switch
2984   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
2985 
2986   __ bind(Lint);
2987   __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2988   __ push(itos);
2989   // Rewrite bytecode to be faster
2990   if (!is_static && rc == may_rewrite) {
2991     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
2992   }
2993 
2994   __ bind(Done);
2995 
2996   // Check for volatile field
2997   Label notVolatile;
2998   __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2999 
3000   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3001 
3002   __ bind(notVolatile);
3003 }
3004 
3005 void TemplateTable::getfield(int byte_no) {
3006   getfield_or_static(byte_no, false);
3007 }
3008 
3009 void TemplateTable::nofast_getfield(int byte_no) {
3010   getfield_or_static(byte_no, false, may_not_rewrite);
3011 }
3012 
3013 void TemplateTable::getstatic(int byte_no) {
3014   getfield_or_static(byte_no, true);
3015 }
3016 
3017 
3018 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3019 // Blows volatile registers R0-R3, Rtemp, LR,
3020 // except cache and index registers which are preserved.
3021 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3022   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3023   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3024 
3025   if (__ can_post_field_modification()) {
3026     // Check to see if a field modification watch has been set before we take
3027     // the time to call into the VM.
3028     Label Lcontinue;
3029 
3030     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3031     __ cbz(Rtemp, Lcontinue);
3032 
3033     if (is_static) {
3034       // Life is simple.  Null out the object pointer.
3035       __ mov(R1, 0);
3036     } else {
3037       // Life is harder. The stack holds the value on top, followed by the object.
3038       // We don't know the size of the value, though; it could be one or two words
3039       // depending on its type. As a result, we must find the type to determine where
3040       // the object is.
3041 
3042       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3043       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3044 
3045       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3046       // Make sure we don't need to mask Rtemp after the above shift
3047       ConstantPoolCacheEntry::verify_tos_state_shift();
3048 
3049       __ cmp(Rtemp, ltos);
3050       __ cond_cmp(Rtemp, dtos, ne);
3051       // two word value (ltos/dtos)
3052       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3053 
3054       // one word value (not ltos, dtos)
3055       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3056     }
3057 
3058     // cache entry pointer
3059     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3060     __ add(R2, R2, in_bytes(cp_base_offset));
3061 
3062     // object (tos)
3063     __ mov(R3, Rstack_top);
3064 
3065     // R1: object pointer set up above (NULL if static)
3066     // R2: cache entry pointer
3067     // R3: value object on the stack
3068     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3069                R1, R2, R3);
3070     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3071 
3072     __ bind(Lcontinue);
3073   }
3074 }
3075 
3076 
3077 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3078   transition(vtos, vtos);
3079 
3080   const Register Roffset  = R2_tmp;
3081   const Register Robj     = R3_tmp;
3082   const Register Rcache   = R4_tmp;
3083   const Register Rflagsav = Rtmp_save0;  // R4/R19
3084   const Register Rindex   = R5_tmp;
3085   const Register Rflags   = R5_tmp;
3086 
3087   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3088   jvmti_post_field_mod(Rcache, Rindex, is_static);
3089   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3090 
3091   // Check for volatile field
3092   Label notVolatile;
3093   __ mov(Rflagsav, Rflags);
3094   __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3095 
3096   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3097 
3098   __ bind(notVolatile);
3099 
3100   Label Done, Lint, shouldNotReachHere;
3101   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3102 
3103   // compute type
3104   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3105   // Make sure we don't need to mask flags after the above shift
3106   ConstantPoolCacheEntry::verify_tos_state_shift();
3107 
3108   // There are actually two versions of implementation of putfield/putstatic:
3109   //
3110   // 32-bit ARM:
3111   // 1) Table switch using add(PC,...) instruction (fast_version)
3112   // 2) Table switch using ldr(PC,...) instruction
3113   //
3114   // First version requires fixed size of code block for each case and
3115   // can not be used in RewriteBytecodes and VerifyOops
3116   // modes.
3117 
3118   // Size of fixed size code block for fast_version (in instructions)
3119   const int log_max_block_size = 3;
3120   const int max_block_size = 1 << log_max_block_size;
3121 
3122   // Decide if fast version is enabled
3123   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops;
3124 
3125   assert(number_of_states == 10, "number of tos states should be equal to 9");
3126 
3127   // itos case is frequent and is moved outside table switch
3128   __ cmp(Rflags, itos);
3129 
3130   // table switch by type
3131   if (fast_version) {
3132     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3133   } else  {
3134     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3135   }
3136 
3137   // jump to itos case
3138   __ b(Lint);
3139 
3140   // table with addresses for slow version
3141   if (fast_version) {
3142     // nothing to do
3143   } else  {
3144     __ bind(Ltable);
3145     __ emit_address(Lbtos);
3146     __ emit_address(Lztos);
3147     __ emit_address(Lctos);
3148     __ emit_address(Lstos);
3149     __ emit_address(Litos);
3150     __ emit_address(Lltos);
3151     __ emit_address(Lftos);
3152     __ emit_address(Ldtos);
3153     __ emit_address(Latos);
3154   }
3155 
3156 #ifdef ASSERT
3157   int seq = 0;
3158 #endif
3159   // btos
3160   {
3161     assert(btos == seq++, "btos has unexpected value");
3162     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3163     __ bind(Lbtos);
3164     __ pop(btos);
3165     if (!is_static) pop_and_check_object(Robj);
3166     __ access_store_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3167     if (!is_static && rc == may_rewrite) {
3168       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3169     }
3170     __ b(Done);
3171   }
3172 
3173   // ztos
3174   {
3175     assert(ztos == seq++, "ztos has unexpected value");
3176     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3177     __ bind(Lztos);
3178     __ pop(ztos);
3179     if (!is_static) pop_and_check_object(Robj);
3180     __ access_store_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3181     if (!is_static && rc == may_rewrite) {
3182       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3183     }
3184     __ b(Done);
3185   }
3186 
3187   // ctos
3188   {
3189     assert(ctos == seq++, "ctos has unexpected value");
3190     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3191     __ bind(Lctos);
3192     __ pop(ctos);
3193     if (!is_static) pop_and_check_object(Robj);
3194     __ access_store_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3195     if (!is_static && rc == may_rewrite) {
3196       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3197     }
3198     __ b(Done);
3199   }
3200 
3201   // stos
3202   {
3203     assert(stos == seq++, "stos has unexpected value");
3204     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3205     __ bind(Lstos);
3206     __ pop(stos);
3207     if (!is_static) pop_and_check_object(Robj);
3208     __ access_store_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3209     if (!is_static && rc == may_rewrite) {
3210       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3211     }
3212     __ b(Done);
3213   }
3214 
3215   // itos
3216   {
3217     assert(itos == seq++, "itos has unexpected value");
3218     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3219     __ bind(Litos);
3220     __ b(shouldNotReachHere);
3221   }
3222 
3223   // ltos
3224   {
3225     assert(ltos == seq++, "ltos has unexpected value");
3226     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3227     __ bind(Lltos);
3228     __ pop(ltos);
3229     if (!is_static) pop_and_check_object(Robj);
3230     __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
3231     if (!is_static && rc == may_rewrite) {
3232       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3233     }
3234     __ b(Done);
3235   }
3236 
3237   // ftos
3238   {
3239     assert(ftos == seq++, "ftos has unexpected value");
3240     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3241     __ bind(Lftos);
3242     // floats and ints are placed on stack in the same way, so
3243     // we can use pop(itos) to transfer value without using VFP
3244     __ pop(itos);
3245     if (!is_static) pop_and_check_object(Robj);
3246     __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3247     if (!is_static && rc == may_rewrite) {
3248       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3249     }
3250     __ b(Done);
3251   }
3252 
3253   // dtos
3254   {
3255     assert(dtos == seq++, "dtos has unexpected value");
3256     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3257     __ bind(Ldtos);
3258     // doubles and longs are placed on stack in the same way, so
3259     // we can use pop(ltos) to transfer value without using VFP
3260     __ pop(ltos);
3261     if (!is_static) pop_and_check_object(Robj);
3262     __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
3263     if (!is_static && rc == may_rewrite) {
3264       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3265     }
3266     __ b(Done);
3267   }
3268 
3269   // atos
3270   {
3271     assert(atos == seq++, "dtos has unexpected value");
3272     __ bind(Latos);
3273     __ pop(atos);
3274     if (!is_static) pop_and_check_object(Robj);
3275     // Store into the field
3276     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3277     if (!is_static && rc == may_rewrite) {
3278       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3279     }
3280     __ b(Done);
3281   }
3282 
3283   __ bind(shouldNotReachHere);
3284   __ should_not_reach_here();
3285 
3286   // itos case is frequent and is moved outside table switch
3287   __ bind(Lint);
3288   __ pop(itos);
3289   if (!is_static) pop_and_check_object(Robj);
3290   __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3291   if (!is_static && rc == may_rewrite) {
3292     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3293   }
3294 
3295   __ bind(Done);
3296 
3297   Label notVolatile2;
3298   if (is_static) {
3299     // Just check for volatile. Memory barrier for static final field
3300     // is handled by class initialization.
3301     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
3302     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3303     __ bind(notVolatile2);
3304   } else {
3305     // Check for volatile field and final field
3306     Label skipMembar;
3307 
3308     __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3309            1 << ConstantPoolCacheEntry::is_final_shift);
3310     __ b(skipMembar, eq);
3311 
3312     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
3313 
3314     // StoreLoad barrier after volatile field write
3315     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3316     __ b(skipMembar);
3317 
3318     // StoreStore barrier after final field write
3319     __ bind(notVolatile2);
3320     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3321 
3322     __ bind(skipMembar);
3323   }
3324 }
3325 
3326 void TemplateTable::putfield(int byte_no) {
3327   putfield_or_static(byte_no, false);
3328 }
3329 
3330 void TemplateTable::nofast_putfield(int byte_no) {
3331   putfield_or_static(byte_no, false, may_not_rewrite);
3332 }
3333 
3334 void TemplateTable::putstatic(int byte_no) {
3335   putfield_or_static(byte_no, true);
3336 }
3337 
3338 
3339 void TemplateTable::jvmti_post_fast_field_mod() {
3340   // This version of jvmti_post_fast_field_mod() is not used on ARM
3341   Unimplemented();
3342 }
3343 
3344 // Blows volatile registers R0-R3, Rtemp, LR,
3345 // but preserves tosca with the given state.
3346 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3347   if (__ can_post_field_modification()) {
3348     // Check to see if a field modification watch has been set before we take
3349     // the time to call into the VM.
3350     Label done;
3351 
3352     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3353     __ cbz(R2, done);
3354 
3355     __ pop_ptr(R3);               // copy the object pointer from tos
3356     __ verify_oop(R3);
3357     __ push_ptr(R3);              // put the object pointer back on tos
3358 
3359     __ push(state);               // save value on the stack
3360 
3361     // access constant pool cache entry
3362     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3363 
3364     __ mov(R1, R3);
3365     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3366     __ mov(R3, Rstack_top); // put tos addr into R3
3367 
3368     // R1: object pointer copied above
3369     // R2: cache entry pointer
3370     // R3: jvalue object on the stack
3371     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3372 
3373     __ pop(state);                // restore value
3374 
3375     __ bind(done);
3376   }
3377 }
3378 
3379 
3380 void TemplateTable::fast_storefield(TosState state) {
3381   transition(state, vtos);
3382 
3383   ByteSize base = ConstantPoolCache::base_offset();
3384 
3385   jvmti_post_fast_field_mod(state);
3386 
3387   const Register Rcache  = R2_tmp;
3388   const Register Rindex  = R3_tmp;
3389   const Register Roffset = R3_tmp;
3390   const Register Rflags  = Rtmp_save0; // R4/R19
3391   const Register Robj    = R5_tmp;
3392 
3393   // access constant pool cache
3394   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3395 
3396   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3397 
3398   // load flags to test volatile
3399   __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3400 
3401   // replace index with field offset from cache entry
3402   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3403 
3404   // Check for volatile store
3405   Label notVolatile;
3406   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3407 
3408   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3409 
3410   __ bind(notVolatile);
3411 
3412   // Get object from stack
3413   pop_and_check_object(Robj);
3414 
3415   Address addr = Address(Robj, Roffset);
3416   // access field
3417   switch (bytecode()) {
3418     case Bytecodes::_fast_zputfield:
3419       __ access_store_at(T_BOOLEAN, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3420       break;
3421     case Bytecodes::_fast_bputfield:
3422       __ access_store_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3423       break;
3424     case Bytecodes::_fast_sputfield:
3425       __ access_store_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3426       break;
3427     case Bytecodes::_fast_cputfield:
3428       __ access_store_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg,false);
3429       break;
3430     case Bytecodes::_fast_iputfield:
3431       __ access_store_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3432       break;
3433     case Bytecodes::_fast_lputfield:
3434       __ access_store_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3435       break;
3436     case Bytecodes::_fast_fputfield:
3437       __ access_store_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3438       break;
3439     case Bytecodes::_fast_dputfield:
3440       __ access_store_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3441       break;
3442     case Bytecodes::_fast_aputfield:
3443       do_oop_store(_masm, addr, R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3444       break;
3445 
3446     default:
3447       ShouldNotReachHere();
3448   }
3449 
3450   Label notVolatile2;
3451   Label skipMembar;
3452   __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3453          1 << ConstantPoolCacheEntry::is_final_shift);
3454   __ b(skipMembar, eq);
3455 
3456   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
3457 
3458   // StoreLoad barrier after volatile field write
3459   volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3460   __ b(skipMembar);
3461 
3462   // StoreStore barrier after final field write
3463   __ bind(notVolatile2);
3464   volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3465 
3466   __ bind(skipMembar);
3467 }
3468 
3469 void TemplateTable::fast_accessfield(TosState state) {
3470   transition(atos, state);
3471 
3472   // do the JVMTI work here to avoid disturbing the register state below
3473   if (__ can_post_field_access()) {
3474     // Check to see if a field access watch has been set before we take
3475     // the time to call into the VM.
3476     Label done;
3477     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3478     __ cbz(R2, done);
3479     // access constant pool cache entry
3480     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3481     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3482     __ verify_oop(R0_tos);
3483     __ mov(R1, R0_tos);
3484     // R1: object pointer copied above
3485     // R2: cache entry pointer
3486     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3487     __ pop_ptr(R0_tos);   // restore object pointer
3488 
3489     __ bind(done);
3490   }
3491 
3492   const Register Robj    = R0_tos;
3493   const Register Rcache  = R2_tmp;
3494   const Register Rflags  = R2_tmp;
3495   const Register Rindex  = R3_tmp;
3496   const Register Roffset = R3_tmp;
3497 
3498   // access constant pool cache
3499   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3500   // replace index with field offset from cache entry
3501   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3502   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3503 
3504   // load flags to test volatile
3505   __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3506 
3507   __ verify_oop(Robj);
3508   __ null_check(Robj, Rtemp);
3509 
3510   Address addr = Address(Robj, Roffset);
3511   // access field
3512   switch (bytecode()) {
3513     case Bytecodes::_fast_bgetfield:
3514       __ access_load_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3515       break;
3516     case Bytecodes::_fast_sgetfield:
3517       __ access_load_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3518       break;
3519     case Bytecodes::_fast_cgetfield:
3520       __ access_load_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3521       break;
3522     case Bytecodes::_fast_igetfield:
3523       __ access_load_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3524       break;
3525     case Bytecodes::_fast_lgetfield:
3526       __ access_load_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3527       break;
3528     case Bytecodes::_fast_fgetfield:
3529       __ access_load_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3530       break;
3531     case Bytecodes::_fast_dgetfield:
3532       __ access_load_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3533       break;
3534     case Bytecodes::_fast_agetfield:
3535       do_oop_load(_masm, R0_tos, addr);
3536       __ verify_oop(R0_tos);
3537       break;
3538     default:
3539       ShouldNotReachHere();
3540   }
3541 
3542   // Check for volatile load
3543   Label notVolatile;
3544   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3545 
3546   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3547 
3548   __ bind(notVolatile);
3549 }
3550 
3551 
3552 void TemplateTable::fast_xaccess(TosState state) {
3553   transition(vtos, state);
3554 
3555   const Register Robj = R1_tmp;
3556   const Register Rcache = R2_tmp;
3557   const Register Rindex = R3_tmp;
3558   const Register Roffset = R3_tmp;
3559   const Register Rflags = R4_tmp;
3560   Label done;
3561 
3562   // get receiver
3563   __ ldr(Robj, aaddress(0));
3564 
3565   // access constant pool cache
3566   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
3567   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3568   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3569 
3570   // load flags to test volatile
3571   __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3572 
3573   // make sure exception is reported in correct bcp range (getfield is next instruction)
3574   __ add(Rbcp, Rbcp, 1);
3575   __ null_check(Robj, Rtemp);
3576   __ sub(Rbcp, Rbcp, 1);
3577 
3578 
3579   if (state == itos) {
3580     __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
3581   } else if (state == atos) {
3582     do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3583     __ verify_oop(R0_tos);
3584   } else if (state == ftos) {
3585 #ifdef __SOFTFP__
3586     __ ldr(R0_tos, Address(Robj, Roffset));
3587 #else
3588     __ access_load_at(T_FLOAT, IN_HEAP, Address(Robj, Roffset), noreg /* ftos */, noreg, noreg, noreg);
3589 #endif // __SOFTFP__
3590   } else {
3591     ShouldNotReachHere();
3592   }
3593 
3594   // Check for volatile load
3595   Label notVolatile;
3596   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3597 
3598   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3599 
3600   __ bind(notVolatile);
3601 
3602   __ bind(done);
3603 }
3604 
3605 
3606 
3607 //----------------------------------------------------------------------------------------------------
3608 // Calls
3609 
3610 void TemplateTable::count_calls(Register method, Register temp) {
3611   // implemented elsewhere
3612   ShouldNotReachHere();
3613 }
3614 
3615 
3616 void TemplateTable::prepare_invoke(int byte_no,
3617                                    Register method,  // linked method (or i-klass)
3618                                    Register index,   // itable index, MethodType, etc.
3619                                    Register recv,    // if caller wants to see it
3620                                    Register flags    // if caller wants to test it
3621                                    ) {
3622   // determine flags
3623   const Bytecodes::Code code = bytecode();
3624   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3625   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3626   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3627   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3628   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3629   const bool load_receiver       = (recv != noreg);
3630   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3631   assert(recv  == noreg || recv  == R2, "");
3632   assert(flags == noreg || flags == R3, "");
3633 
3634   // setup registers & access constant pool cache
3635   if (recv  == noreg)  recv  = R2;
3636   if (flags == noreg)  flags = R3;
3637   const Register temp = Rtemp;
3638   const Register ret_type = R1_tmp;
3639   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
3640 
3641   // save 'interpreter return address'
3642   __ save_bcp();
3643 
3644   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3645 
3646   // maybe push extra argument
3647   if (is_invokedynamic || is_invokehandle) {
3648     Label L_no_push;
3649     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
3650     __ mov(temp, index);
3651     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
3652     __ load_resolved_reference_at_index(index, temp);
3653     __ verify_oop(index);
3654     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
3655     __ bind(L_no_push);
3656   }
3657 
3658   // load receiver if needed (after extra argument is pushed so parameter size is correct)
3659   if (load_receiver) {
3660     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
3661     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
3662     __ ldr(recv, recv_addr);
3663     __ verify_oop(recv);
3664   }
3665 
3666   // compute return type
3667   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
3668   // Make sure we don't need to mask flags after the above shift
3669   ConstantPoolCacheEntry::verify_tos_state_shift();
3670   // load return address
3671   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
3672     __ mov_slow(temp, table);
3673     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
3674   }
3675 }
3676 
3677 
3678 void TemplateTable::invokevirtual_helper(Register index,
3679                                          Register recv,
3680                                          Register flags) {
3681 
3682   const Register recv_klass = R2_tmp;
3683 
3684   assert_different_registers(index, recv, flags, Rtemp);
3685   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
3686 
3687   // Test for an invoke of a final method
3688   Label notFinal;
3689   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
3690 
3691   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
3692 
3693   // do the call - the index is actually the method to call
3694 
3695   // It's final, need a null check here!
3696   __ null_check(recv, Rtemp);
3697 
3698   // profile this call
3699   __ profile_final_call(R0_tmp);
3700 
3701   __ jump_from_interpreted(Rmethod);
3702 
3703   __ bind(notFinal);
3704 
3705   // get receiver klass
3706   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
3707   __ load_klass(recv_klass, recv);
3708 
3709   // profile this call
3710   __ profile_virtual_call(R0_tmp, recv_klass);
3711 
3712   // get target Method* & entry point
3713   const int base = in_bytes(Klass::vtable_start_offset());
3714   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
3715   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
3716   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
3717   __ jump_from_interpreted(Rmethod);
3718 }
3719 
3720 void TemplateTable::invokevirtual(int byte_no) {
3721   transition(vtos, vtos);
3722   assert(byte_no == f2_byte, "use this argument");
3723 
3724   const Register Rrecv  = R2_tmp;
3725   const Register Rflags = R3_tmp;
3726 
3727   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
3728 
3729   // Rmethod: index
3730   // Rrecv:   receiver
3731   // Rflags:  flags
3732   // LR:      return address
3733 
3734   invokevirtual_helper(Rmethod, Rrecv, Rflags);
3735 }
3736 
3737 
3738 void TemplateTable::invokespecial(int byte_no) {
3739   transition(vtos, vtos);
3740   assert(byte_no == f1_byte, "use this argument");
3741   const Register Rrecv  = R2_tmp;
3742   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
3743   __ verify_oop(Rrecv);
3744   __ null_check(Rrecv, Rtemp);
3745   // do the call
3746   __ profile_call(Rrecv);
3747   __ jump_from_interpreted(Rmethod);
3748 }
3749 
3750 
3751 void TemplateTable::invokestatic(int byte_no) {
3752   transition(vtos, vtos);
3753   assert(byte_no == f1_byte, "use this argument");
3754   prepare_invoke(byte_no, Rmethod);
3755   // do the call
3756   __ profile_call(R2_tmp);
3757   __ jump_from_interpreted(Rmethod);
3758 }
3759 
3760 
3761 void TemplateTable::fast_invokevfinal(int byte_no) {
3762   transition(vtos, vtos);
3763   assert(byte_no == f2_byte, "use this argument");
3764   __ stop("fast_invokevfinal is not used on ARM");
3765 }
3766 
3767 
3768 void TemplateTable::invokeinterface(int byte_no) {
3769   transition(vtos, vtos);
3770   assert(byte_no == f1_byte, "use this argument");
3771 
3772   const Register Ritable = R1_tmp;
3773   const Register Rrecv   = R2_tmp;
3774   const Register Rinterf = R5_tmp;
3775   const Register Rindex  = R4_tmp;
3776   const Register Rflags  = R3_tmp;
3777   const Register Rklass  = R2_tmp; // Note! Same register with Rrecv
3778 
3779   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
3780 
3781   // First check for Object case, then private interface method,
3782   // then regular interface method.
3783 
3784   // Special case of invokeinterface called for virtual method of
3785   // java.lang.Object.  See cpCache.cpp for details.
3786   Label notObjectMethod;
3787   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
3788   invokevirtual_helper(Rmethod, Rrecv, Rflags);
3789   __ bind(notObjectMethod);
3790 
3791   // Get receiver klass into Rklass - also a null check
3792   __ load_klass(Rklass, Rrecv);
3793 
3794   // Check for private method invocation - indicated by vfinal
3795   Label no_such_interface;
3796 
3797   Label notVFinal;
3798   __ tbz(Rflags, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
3799 
3800   Label subtype;
3801   __ check_klass_subtype(Rklass, Rinterf, R1_tmp, R3_tmp, noreg, subtype);
3802   // If we get here the typecheck failed
3803   __ b(no_such_interface);
3804   __ bind(subtype);
3805 
3806   // do the call
3807   __ profile_final_call(R0_tmp);
3808   __ jump_from_interpreted(Rmethod);
3809 
3810   __ bind(notVFinal);
3811 
3812   // Receiver subtype check against REFC.
3813   __ lookup_interface_method(// inputs: rec. class, interface
3814                              Rklass, Rinterf, noreg,
3815                              // outputs:  scan temp. reg1, scan temp. reg2
3816                              noreg, Ritable, Rtemp,
3817                              no_such_interface);
3818 
3819   // profile this call
3820   __ profile_virtual_call(R0_tmp, Rklass);
3821 
3822   // Get declaring interface class from method
3823   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
3824   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
3825   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
3826 
3827   // Get itable index from method
3828   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
3829   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
3830   __ neg(Rindex, Rtemp);
3831 
3832   __ lookup_interface_method(// inputs: rec. class, interface
3833                              Rklass, Rinterf, Rindex,
3834                              // outputs:  scan temp. reg1, scan temp. reg2
3835                              Rmethod, Ritable, Rtemp,
3836                              no_such_interface);
3837 
3838   // Rmethod: Method* to call
3839 
3840   // Check for abstract method error
3841   // Note: This should be done more efficiently via a throw_abstract_method_error
3842   //       interpreter entry point and a conditional jump to it in case of a null
3843   //       method.
3844   { Label L;
3845     __ cbnz(Rmethod, L);
3846     // throw exception
3847     // note: must restore interpreter registers to canonical
3848     //       state for exception handling to work correctly!
3849     __ restore_method();
3850     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3851     // the call_VM checks for exception, so we should never return here.
3852     __ should_not_reach_here();
3853     __ bind(L);
3854   }
3855 
3856   // do the call
3857   __ jump_from_interpreted(Rmethod);
3858 
3859   // throw exception
3860   __ bind(no_such_interface);
3861   __ restore_method();
3862   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3863   // the call_VM checks for exception, so we should never return here.
3864   __ should_not_reach_here();
3865 }
3866 
3867 void TemplateTable::invokehandle(int byte_no) {
3868   transition(vtos, vtos);
3869 
3870   const Register Rrecv  = R2_tmp;
3871   const Register Rmtype = R4_tmp;
3872   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
3873 
3874   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
3875   __ null_check(Rrecv, Rtemp);
3876 
3877   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
3878   // Rmethod: MH.invokeExact_MT method (from f2)
3879 
3880   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
3881 
3882   // do the call
3883   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
3884   __ mov(Rmethod, R5_method);
3885   __ jump_from_interpreted(Rmethod);
3886 }
3887 
3888 void TemplateTable::invokedynamic(int byte_no) {
3889   transition(vtos, vtos);
3890 
3891   const Register Rcallsite = R4_tmp;
3892   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
3893 
3894   prepare_invoke(byte_no, R5_method, Rcallsite);
3895 
3896   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
3897   // Rmethod:   MH.linkToCallSite method (from f2)
3898 
3899   // Note:  Rcallsite is already pushed by prepare_invoke
3900 
3901   if (ProfileInterpreter) {
3902     __ profile_call(R2_tmp);
3903   }
3904 
3905   // do the call
3906   __ mov(Rmethod, R5_method);
3907   __ jump_from_interpreted(Rmethod);
3908 }
3909 
3910 //----------------------------------------------------------------------------------------------------
3911 // Allocation
3912 
3913 void TemplateTable::_new() {
3914   transition(vtos, atos);
3915 
3916   const Register Robj   = R0_tos;
3917   const Register Rcpool = R1_tmp;
3918   const Register Rindex = R2_tmp;
3919   const Register Rtags  = R3_tmp;
3920   const Register Rsize  = R3_tmp;
3921 
3922   Register Rklass = R4_tmp;
3923   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
3924   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
3925 
3926   Label slow_case;
3927   Label done;
3928   Label initialize_header;
3929   Label initialize_object;  // including clearing the fields
3930 
3931   const bool allow_shared_alloc =
3932     Universe::heap()->supports_inline_contig_alloc();
3933 
3934   // Literals
3935   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
3936 
3937   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
3938   __ get_cpool_and_tags(Rcpool, Rtags);
3939 
3940   // Make sure the class we're about to instantiate has been resolved.
3941   // This is done before loading InstanceKlass to be consistent with the order
3942   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3943   const int tags_offset = Array<u1>::base_offset_in_bytes();
3944   __ add(Rtemp, Rtags, Rindex);
3945 
3946   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
3947 
3948   // use Rklass as a scratch
3949   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
3950 
3951   // get InstanceKlass
3952   __ cmp(Rtemp, JVM_CONSTANT_Class);
3953   __ b(slow_case, ne);
3954   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
3955 
3956   // make sure klass is initialized & doesn't have finalizer
3957   // make sure klass is fully initialized
3958   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
3959   __ cmp(Rtemp, InstanceKlass::fully_initialized);
3960   __ b(slow_case, ne);
3961 
3962   // get instance_size in InstanceKlass (scaled to a count of bytes)
3963   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
3964 
3965   // test to see if it has a finalizer or is malformed in some way
3966   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
3967   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3968 
3969   // Allocate the instance:
3970   //  If TLAB is enabled:
3971   //    Try to allocate in the TLAB.
3972   //    If fails, go to the slow path.
3973   //  Else If inline contiguous allocations are enabled:
3974   //    Try to allocate in eden.
3975   //    If fails due to heap end, go to slow path.
3976   //
3977   //  If TLAB is enabled OR inline contiguous is enabled:
3978   //    Initialize the allocation.
3979   //    Exit.
3980   //
3981   //  Go to slow path.
3982   if (UseTLAB) {
3983     const Register Rtlab_top = R1_tmp;
3984     const Register Rtlab_end = R2_tmp;
3985     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
3986 
3987     __ tlab_allocate(Robj, Rtlab_top, Rtlab_end, Rsize, slow_case);
3988     if (ZeroTLAB) {
3989       // the fields have been already cleared
3990       __ b(initialize_header);
3991     } else {
3992       // initialize both the header and fields
3993       __ b(initialize_object);
3994     }
3995   } else {
3996     // Allocation in the shared Eden, if allowed.
3997     if (allow_shared_alloc) {
3998       const Register Rheap_top_addr = R2_tmp;
3999       const Register Rheap_top = R5_tmp;
4000       const Register Rheap_end = Rtemp;
4001       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4002 
4003       __ eden_allocate(Robj, Rheap_top, Rheap_top_addr, Rheap_end, Rsize, slow_case);
4004     }
4005   }
4006 
4007   if (UseTLAB || allow_shared_alloc) {
4008     const Register Rzero0 = R1_tmp;
4009     const Register Rzero1 = R2_tmp;
4010     const Register Rzero_end = R5_tmp;
4011     const Register Rzero_cur = Rtemp;
4012     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4013 
4014     // The object is initialized before the header.  If the object size is
4015     // zero, go directly to the header initialization.
4016     __ bind(initialize_object);
4017     __ subs(Rsize, Rsize, sizeof(oopDesc));
4018     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4019     __ b(initialize_header, eq);
4020 
4021 #ifdef ASSERT
4022     // make sure Rsize is a multiple of 8
4023     Label L;
4024     __ tst(Rsize, 0x07);
4025     __ b(L, eq);
4026     __ stop("object size is not multiple of 8 - adjust this code");
4027     __ bind(L);
4028 #endif
4029 
4030     __ mov(Rzero0, 0);
4031     __ mov(Rzero1, 0);
4032     __ add(Rzero_end, Rzero_cur, Rsize);
4033 
4034     // initialize remaining object fields: Rsize was a multiple of 8
4035     { Label loop;
4036       // loop is unrolled 2 times
4037       __ bind(loop);
4038       // #1
4039       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4040       __ cmp(Rzero_cur, Rzero_end);
4041       // #2
4042       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4043       __ cmp(Rzero_cur, Rzero_end, ne);
4044       __ b(loop, ne);
4045     }
4046 
4047     // initialize object header only.
4048     __ bind(initialize_header);
4049     if (UseBiasedLocking) {
4050       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4051     } else {
4052       __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4053     }
4054     // mark
4055     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4056 
4057     // klass
4058     __ store_klass(Rklass, Robj); // blows Rklass:
4059     Rklass = noreg;
4060 
4061     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4062     if (DTraceAllocProbes) {
4063       // Trigger dtrace event for fastpath
4064       Label Lcontinue;
4065 
4066       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4067       __ cbz(Rtemp, Lcontinue);
4068 
4069       __ push(atos);
4070       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4071       __ pop(atos);
4072 
4073       __ bind(Lcontinue);
4074     }
4075 
4076     __ b(done);
4077   } else {
4078     // jump over literals
4079     __ b(slow_case);
4080   }
4081 
4082   if (allow_shared_alloc) {
4083     __ bind_literal(Lheap_top_addr);
4084   }
4085 
4086   // slow case
4087   __ bind(slow_case);
4088   __ get_constant_pool(Rcpool);
4089   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4090   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4091 
4092   // continue
4093   __ bind(done);
4094 
4095   // StoreStore barrier required after complete initialization
4096   // (headers + content zeroing), before the object may escape.
4097   __ membar(MacroAssembler::StoreStore, R1_tmp);
4098 }
4099 
4100 
4101 void TemplateTable::newarray() {
4102   transition(itos, atos);
4103   __ ldrb(R1, at_bcp(1));
4104   __ mov(R2, R0_tos);
4105   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4106   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4107 }
4108 
4109 
4110 void TemplateTable::anewarray() {
4111   transition(itos, atos);
4112   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4113   __ get_constant_pool(R1);
4114   __ mov(R3, R0_tos);
4115   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4116   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4117 }
4118 
4119 
4120 void TemplateTable::arraylength() {
4121   transition(atos, itos);
4122   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4123   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4124 }
4125 
4126 
4127 void TemplateTable::checkcast() {
4128   transition(atos, atos);
4129   Label done, is_null, quicked, resolved, throw_exception;
4130 
4131   const Register Robj = R0_tos;
4132   const Register Rcpool = R2_tmp;
4133   const Register Rtags = R3_tmp;
4134   const Register Rindex = R4_tmp;
4135   const Register Rsuper = R3_tmp;
4136   const Register Rsub   = R4_tmp;
4137   const Register Rsubtype_check_tmp1 = R1_tmp;
4138   const Register Rsubtype_check_tmp2 = LR_tmp;
4139 
4140   __ cbz(Robj, is_null);
4141 
4142   // Get cpool & tags index
4143   __ get_cpool_and_tags(Rcpool, Rtags);
4144   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4145 
4146   // See if bytecode has already been quicked
4147   __ add(Rtemp, Rtags, Rindex);
4148   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4149 
4150   __ cmp(Rtemp, JVM_CONSTANT_Class);
4151 
4152   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4153 
4154   __ b(quicked, eq);
4155 
4156   __ push(atos);
4157   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4158   // vm_result_2 has metadata result
4159   __ get_vm_result_2(Rsuper, Robj);
4160   __ pop_ptr(Robj);
4161   __ b(resolved);
4162 
4163   __ bind(throw_exception);
4164   // Come here on failure of subtype check
4165   __ profile_typecheck_failed(R1_tmp);
4166   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4167   __ b(Interpreter::_throw_ClassCastException_entry);
4168 
4169   // Get superklass in Rsuper and subklass in Rsub
4170   __ bind(quicked);
4171   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4172 
4173   __ bind(resolved);
4174   __ load_klass(Rsub, Robj);
4175 
4176   // Generate subtype check. Blows both tmps and Rtemp.
4177   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4178   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4179 
4180   // Come here on success
4181 
4182   // Collect counts on whether this check-cast sees NULLs a lot or not.
4183   if (ProfileInterpreter) {
4184     __ b(done);
4185     __ bind(is_null);
4186     __ profile_null_seen(R1_tmp);
4187   } else {
4188     __ bind(is_null);   // same as 'done'
4189   }
4190   __ bind(done);
4191 }
4192 
4193 
4194 void TemplateTable::instanceof() {
4195   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4196   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4197 
4198   transition(atos, itos);
4199   Label done, is_null, not_subtype, quicked, resolved;
4200 
4201   const Register Robj = R0_tos;
4202   const Register Rcpool = R2_tmp;
4203   const Register Rtags = R3_tmp;
4204   const Register Rindex = R4_tmp;
4205   const Register Rsuper = R3_tmp;
4206   const Register Rsub   = R4_tmp;
4207   const Register Rsubtype_check_tmp1 = R0_tmp;
4208   const Register Rsubtype_check_tmp2 = R1_tmp;
4209 
4210   __ cbz(Robj, is_null);
4211 
4212   __ load_klass(Rsub, Robj);
4213 
4214   // Get cpool & tags index
4215   __ get_cpool_and_tags(Rcpool, Rtags);
4216   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4217 
4218   // See if bytecode has already been quicked
4219   __ add(Rtemp, Rtags, Rindex);
4220   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4221   __ cmp(Rtemp, JVM_CONSTANT_Class);
4222 
4223   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4224 
4225   __ b(quicked, eq);
4226 
4227   __ push(atos);
4228   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4229   // vm_result_2 has metadata result
4230   __ get_vm_result_2(Rsuper, Robj);
4231   __ pop_ptr(Robj);
4232   __ b(resolved);
4233 
4234   // Get superklass in Rsuper and subklass in Rsub
4235   __ bind(quicked);
4236   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4237 
4238   __ bind(resolved);
4239   __ load_klass(Rsub, Robj);
4240 
4241   // Generate subtype check. Blows both tmps and Rtemp.
4242   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4243 
4244   // Come here on success
4245   __ mov(R0_tos, 1);
4246   __ b(done);
4247 
4248   __ bind(not_subtype);
4249   // Come here on failure
4250   __ profile_typecheck_failed(R1_tmp);
4251   __ mov(R0_tos, 0);
4252 
4253   // Collect counts on whether this test sees NULLs a lot or not.
4254   if (ProfileInterpreter) {
4255     __ b(done);
4256     __ bind(is_null);
4257     __ profile_null_seen(R1_tmp);
4258   } else {
4259     __ bind(is_null);   // same as 'done'
4260   }
4261   __ bind(done);
4262 }
4263 
4264 
4265 //----------------------------------------------------------------------------------------------------
4266 // Breakpoints
4267 void TemplateTable::_breakpoint() {
4268 
4269   // Note: We get here even if we are single stepping..
4270   // jbug inists on setting breakpoints at every bytecode
4271   // even if we are in single step mode.
4272 
4273   transition(vtos, vtos);
4274 
4275   // get the unpatched byte code
4276   __ mov(R1, Rmethod);
4277   __ mov(R2, Rbcp);
4278   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4279   __ mov(Rtmp_save0, R0);
4280 
4281   // post the breakpoint event
4282   __ mov(R1, Rmethod);
4283   __ mov(R2, Rbcp);
4284   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4285 
4286   // complete the execution of original bytecode
4287   __ mov(R3_bytecode, Rtmp_save0);
4288   __ dispatch_only_normal(vtos);
4289 }
4290 
4291 
4292 //----------------------------------------------------------------------------------------------------
4293 // Exceptions
4294 
4295 void TemplateTable::athrow() {
4296   transition(atos, vtos);
4297   __ mov(Rexception_obj, R0_tos);
4298   __ null_check(Rexception_obj, Rtemp);
4299   __ b(Interpreter::throw_exception_entry());
4300 }
4301 
4302 
4303 //----------------------------------------------------------------------------------------------------
4304 // Synchronization
4305 //
4306 // Note: monitorenter & exit are symmetric routines; which is reflected
4307 //       in the assembly code structure as well
4308 //
4309 // Stack layout:
4310 //
4311 // [expressions  ] <--- Rstack_top        = expression stack top
4312 // ..
4313 // [expressions  ]
4314 // [monitor entry] <--- monitor block top = expression stack bot
4315 // ..
4316 // [monitor entry]
4317 // [frame data   ] <--- monitor block bot
4318 // ...
4319 // [saved FP     ] <--- FP
4320 
4321 
4322 void TemplateTable::monitorenter() {
4323   transition(atos, vtos);
4324 
4325   const Register Robj = R0_tos;
4326   const Register Rentry = R1_tmp;
4327 
4328   // check for NULL object
4329   __ null_check(Robj, Rtemp);
4330 
4331   __ resolve(IS_NOT_NULL, Robj);
4332 
4333   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4334   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4335   Label allocate_monitor, allocated;
4336 
4337   // initialize entry pointer
4338   __ mov(Rentry, 0);                             // points to free slot or NULL
4339 
4340   // find a free slot in the monitor block (result in Rentry)
4341   { Label loop, exit;
4342     const Register Rcur = R2_tmp;
4343     const Register Rcur_obj = Rtemp;
4344     const Register Rbottom = R3_tmp;
4345     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4346 
4347     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4348                                  // points to current entry, starting with top-most entry
4349     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4350                                  // points to word before bottom of monitor block
4351 
4352     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4353     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4354                                                  // prefetch monitor's object for the first iteration
4355     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4356 
4357     __ bind(loop);
4358     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4359     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4360 
4361     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4362     __ b(exit, eq);                              // if same object then stop searching
4363 
4364     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4365 
4366     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4367     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4368                                                  // prefetch monitor's object for the next iteration
4369     __ b(loop, ne);                              // if not at bottom then check this entry
4370     __ bind(exit);
4371   }
4372 
4373   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4374 
4375   __ bind(allocate_monitor);
4376 
4377   // allocate one if there's no free slot
4378   { Label loop;
4379     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4380 
4381     // 1. compute new pointers
4382 
4383 
4384     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4385                                                  // old monitor block top / expression stack bottom
4386 
4387     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4388     __ check_stack_top_on_expansion();
4389 
4390     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4391 
4392     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4393 
4394     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4395                                                  // set new monitor block top
4396 
4397     // 2. move expression stack contents
4398 
4399     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4400     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4401     __ b(allocated, eq);
4402 
4403     __ bind(loop);
4404     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4405                                                             // and advance to next word
4406     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4407     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4408     __ b(loop, ne);                                         // if not at bottom then copy next word
4409   }
4410 
4411   // call run-time routine
4412 
4413   // Rentry: points to monitor entry
4414   __ bind(allocated);
4415 
4416   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4417   // The object has already been poped from the stack, so the expression stack looks correct.
4418   __ add(Rbcp, Rbcp, 1);
4419 
4420   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
4421   __ lock_object(Rentry);
4422 
4423   // check to make sure this monitor doesn't cause stack overflow after locking
4424   __ save_bcp();  // in case of exception
4425   __ arm_stack_overflow_check(0, Rtemp);
4426 
4427   // The bcp has already been incremented. Just need to dispatch to next instruction.
4428   __ dispatch_next(vtos);
4429 }
4430 
4431 
4432 void TemplateTable::monitorexit() {
4433   transition(atos, vtos);
4434 
4435   const Register Robj = R0_tos;
4436   const Register Rcur = R1_tmp;
4437   const Register Rbottom = R2_tmp;
4438   const Register Rcur_obj = Rtemp;
4439 
4440   // check for NULL object
4441   __ null_check(Robj, Rtemp);
4442 
4443   __ resolve(IS_NOT_NULL, Robj);
4444 
4445   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4446   Label found, throw_exception;
4447 
4448   // find matching slot
4449   { Label loop;
4450     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4451 
4452     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4453                                  // points to current entry, starting with top-most entry
4454     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4455                                  // points to word before bottom of monitor block
4456 
4457     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4458     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4459                                                  // prefetch monitor's object for the first iteration
4460     __ b(throw_exception, eq);                   // throw exception if there are now monitors
4461 
4462     __ bind(loop);
4463     // check if current entry is for same object
4464     __ cmp(Rcur_obj, Robj);
4465     __ b(found, eq);                             // if same object then stop searching
4466     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4467     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4468     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4469     __ b (loop, ne);                             // if not at bottom then check this entry
4470   }
4471 
4472   // error handling. Unlocking was not block-structured
4473   __ bind(throw_exception);
4474   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4475   __ should_not_reach_here();
4476 
4477   // call run-time routine
4478   // Rcur: points to monitor entry
4479   __ bind(found);
4480   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
4481   __ unlock_object(Rcur);
4482   __ pop_ptr(Robj);                              // discard object
4483 }
4484 
4485 
4486 //----------------------------------------------------------------------------------------------------
4487 // Wide instructions
4488 
4489 void TemplateTable::wide() {
4490   transition(vtos, vtos);
4491   __ ldrb(R3_bytecode, at_bcp(1));
4492 
4493   InlinedAddress Ltable((address)Interpreter::_wentry_point);
4494   __ ldr_literal(Rtemp, Ltable);
4495   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
4496 
4497   __ nop(); // to avoid filling CPU pipeline with invalid instructions
4498   __ nop();
4499   __ bind_literal(Ltable);
4500 }
4501 
4502 
4503 //----------------------------------------------------------------------------------------------------
4504 // Multi arrays
4505 
4506 void TemplateTable::multianewarray() {
4507   transition(vtos, atos);
4508   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
4509 
4510   // last dim is on top of stack; we want address of first one:
4511   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
4512   // the latter wordSize to point to the beginning of the array.
4513   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4514   __ sub(R1, Rtemp, wordSize);
4515 
4516   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
4517   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4518   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4519 }