1 /*
   2  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.inline.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/method.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/synchronizer.hpp"
  41 
  42 #define __ _masm->
  43 
  44 // Platform-dependent initialization
  45 
  46 void TemplateTable::pd_initialize() {
  47   // No aarch64 specific initialization
  48 }
  49 
  50 // Address computation: local variables
  51 
  52 static inline Address iaddress(int n) {
  53   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  54 }
  55 
  56 static inline Address laddress(int n) {
  57   return iaddress(n + 1);
  58 }
  59 
  60 static inline Address faddress(int n) {
  61   return iaddress(n);
  62 }
  63 
  64 static inline Address daddress(int n) {
  65   return laddress(n);
  66 }
  67 
  68 static inline Address aaddress(int n) {
  69   return iaddress(n);
  70 }
  71 
  72 static inline Address iaddress(Register r) {
  73   return Address(rlocals, r, Address::lsl(3));
  74 }
  75 
  76 static inline Address laddress(Register r, Register scratch,
  77                                InterpreterMacroAssembler* _masm) {
  78   __ lea(scratch, Address(rlocals, r, Address::lsl(3)));
  79   return Address(scratch, Interpreter::local_offset_in_bytes(1));
  80 }
  81 
  82 static inline Address faddress(Register r) {
  83   return iaddress(r);
  84 }
  85 
  86 static inline Address daddress(Register r, Register scratch,
  87                                InterpreterMacroAssembler* _masm) {
  88   return laddress(r, scratch, _masm);
  89 }
  90 
  91 static inline Address aaddress(Register r) {
  92   return iaddress(r);
  93 }
  94 
  95 static inline Address at_rsp() {
  96   return Address(esp, 0);
  97 }
  98 
  99 // At top of Java expression stack which may be different than esp().  It
 100 // isn't for category 1 objects.
 101 static inline Address at_tos   () {
 102   return Address(esp,  Interpreter::expr_offset_in_bytes(0));
 103 }
 104 
 105 static inline Address at_tos_p1() {
 106   return Address(esp,  Interpreter::expr_offset_in_bytes(1));
 107 }
 108 
 109 static inline Address at_tos_p2() {
 110   return Address(esp,  Interpreter::expr_offset_in_bytes(2));
 111 }
 112 
 113 static inline Address at_tos_p3() {
 114   return Address(esp,  Interpreter::expr_offset_in_bytes(3));
 115 }
 116 
 117 static inline Address at_tos_p4() {
 118   return Address(esp,  Interpreter::expr_offset_in_bytes(4));
 119 }
 120 
 121 static inline Address at_tos_p5() {
 122   return Address(esp,  Interpreter::expr_offset_in_bytes(5));
 123 }
 124 
 125 // Condition conversion
 126 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 127   switch (cc) {
 128   case TemplateTable::equal        : return Assembler::NE;
 129   case TemplateTable::not_equal    : return Assembler::EQ;
 130   case TemplateTable::less         : return Assembler::GE;
 131   case TemplateTable::less_equal   : return Assembler::GT;
 132   case TemplateTable::greater      : return Assembler::LE;
 133   case TemplateTable::greater_equal: return Assembler::LT;
 134   }
 135   ShouldNotReachHere();
 136   return Assembler::EQ;
 137 }
 138 
 139 
 140 // Miscelaneous helper routines
 141 // Store an oop (or NULL) at the Address described by obj.
 142 // If val == noreg this means store a NULL
 143 static void do_oop_store(InterpreterMacroAssembler* _masm,
 144                          Address obj,
 145                          Register val,
 146                          BarrierSet::Name barrier,
 147                          bool precise) {
 148   assert(val == noreg || val == r0, "parameter is just for looks");
 149   switch (barrier) {
 150 #if INCLUDE_ALL_GCS
 151     case BarrierSet::G1SATBCTLogging:
 152       {
 153         // flatten object address if needed
 154         if (obj.index() == noreg && obj.offset() == 0) {
 155           if (obj.base() != r3) {
 156             __ mov(r3, obj.base());
 157           }
 158         } else {
 159           __ lea(r3, obj);
 160         }
 161         __ g1_write_barrier_pre(r3 /* obj */,
 162                                 r1 /* pre_val */,
 163                                 rthread /* thread */,
 164                                 r10  /* tmp */,
 165                                 val != noreg /* tosca_live */,
 166                                 false /* expand_call */);
 167         if (val == noreg) {
 168           __ store_heap_oop_null(Address(r3, 0));
 169         } else {
 170           // G1 barrier needs uncompressed oop for region cross check.
 171           Register new_val = val;
 172           if (UseCompressedOops) {
 173             new_val = rscratch1;
 174             __ mov(new_val, val);
 175           }
 176           __ store_heap_oop(Address(r3, 0), val);
 177           __ g1_write_barrier_post(r3 /* store_adr */,
 178                                    new_val /* new_val */,
 179                                    rthread /* thread */,
 180                                    r10 /* tmp */,
 181                                    r1 /* tmp2 */);
 182         }
 183 
 184       }
 185       break;
 186 #endif // INCLUDE_ALL_GCS
 187     case BarrierSet::CardTableForRS:
 188     case BarrierSet::CardTableExtension:
 189       {
 190         if (val == noreg) {
 191           __ store_heap_oop_null(obj);
 192         } else {
 193           __ store_heap_oop(obj, val);
 194           // flatten object address if needed
 195           if (!precise || (obj.index() == noreg && obj.offset() == 0)) {
 196             __ store_check(obj.base());
 197           } else {
 198             __ lea(r3, obj);
 199             __ store_check(r3);
 200           }
 201         }
 202       }
 203       break;
 204     case BarrierSet::ModRef:
 205       if (val == noreg) {
 206         __ store_heap_oop_null(obj);
 207       } else {
 208         __ store_heap_oop(obj, val);
 209       }
 210       break;
 211     default      :
 212       ShouldNotReachHere();
 213 
 214   }
 215 }
 216 
 217 Address TemplateTable::at_bcp(int offset) {
 218   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 219   return Address(rbcp, offset);
 220 }
 221 
 222 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 223                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 224                                    int byte_no)
 225 {
 226   if (!RewriteBytecodes)  return;
 227   Label L_patch_done;
 228 
 229   switch (bc) {
 230   case Bytecodes::_fast_aputfield:
 231   case Bytecodes::_fast_bputfield:
 232   case Bytecodes::_fast_cputfield:
 233   case Bytecodes::_fast_dputfield:
 234   case Bytecodes::_fast_fputfield:
 235   case Bytecodes::_fast_iputfield:
 236   case Bytecodes::_fast_lputfield:
 237   case Bytecodes::_fast_sputfield:
 238     {
 239       // We skip bytecode quickening for putfield instructions when
 240       // the put_code written to the constant pool cache is zero.
 241       // This is required so that every execution of this instruction
 242       // calls out to InterpreterRuntime::resolve_get_put to do
 243       // additional, required work.
 244       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 245       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 246       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 247       __ movw(bc_reg, bc);
 248       __ cmpw(temp_reg, (unsigned) 0);
 249       __ br(Assembler::EQ, L_patch_done);  // don't patch
 250     }
 251     break;
 252   default:
 253     assert(byte_no == -1, "sanity");
 254     // the pair bytecodes have already done the load.
 255     if (load_bc_into_bc_reg) {
 256       __ movw(bc_reg, bc);
 257     }
 258   }
 259 
 260   if (JvmtiExport::can_post_breakpoint()) {
 261     Label L_fast_patch;
 262     // if a breakpoint is present we can't rewrite the stream directly
 263     __ load_unsigned_byte(temp_reg, at_bcp(0));
 264     __ cmpw(temp_reg, Bytecodes::_breakpoint);
 265     __ br(Assembler::NE, L_fast_patch);
 266     // Let breakpoint table handling rewrite to quicker bytecode
 267     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
 268     __ b(L_patch_done);
 269     __ bind(L_fast_patch);
 270   }
 271 
 272 #ifdef ASSERT
 273   Label L_okay;
 274   __ load_unsigned_byte(temp_reg, at_bcp(0));
 275   __ cmpw(temp_reg, (int) Bytecodes::java_code(bc));
 276   __ br(Assembler::EQ, L_okay);
 277   __ cmpw(temp_reg, bc_reg);
 278   __ br(Assembler::EQ, L_okay);
 279   __ stop("patching the wrong bytecode");
 280   __ bind(L_okay);
 281 #endif
 282 
 283   // patch bytecode
 284   __ strb(bc_reg, at_bcp(0));
 285   __ bind(L_patch_done);
 286 }
 287 
 288 
 289 // Individual instructions
 290 
 291 void TemplateTable::nop() {
 292   transition(vtos, vtos);
 293   // nothing to do
 294 }
 295 
 296 void TemplateTable::shouldnotreachhere() {
 297   transition(vtos, vtos);
 298   __ stop("shouldnotreachhere bytecode");
 299 }
 300 
 301 void TemplateTable::aconst_null()
 302 {
 303   transition(vtos, atos);
 304   __ mov(r0, 0);
 305 }
 306 
 307 void TemplateTable::iconst(int value)
 308 {
 309   transition(vtos, itos);
 310   __ mov(r0, value);
 311 }
 312 
 313 void TemplateTable::lconst(int value)
 314 {
 315   __ mov(r0, value);
 316 }
 317 
 318 void TemplateTable::fconst(int value)
 319 {
 320   transition(vtos, ftos);
 321   switch (value) {
 322   case 0:
 323     __ fmovs(v0, zr);
 324     break;
 325   case 1:
 326     __ fmovs(v0, 1.0);
 327     break;
 328   case 2:
 329     __ fmovs(v0, 2.0);
 330     break;
 331   default:
 332     ShouldNotReachHere();
 333     break;
 334   }
 335 }
 336 
 337 void TemplateTable::dconst(int value)
 338 {
 339   transition(vtos, dtos);
 340   switch (value) {
 341   case 0:
 342     __ fmovd(v0, zr);
 343     break;
 344   case 1:
 345     __ fmovd(v0, 1.0);
 346     break;
 347   case 2:
 348     __ fmovd(v0, 2.0);
 349     break;
 350   default:
 351     ShouldNotReachHere();
 352     break;
 353   }
 354 }
 355 
 356 void TemplateTable::bipush()
 357 {
 358   transition(vtos, itos);
 359   __ load_signed_byte32(r0, at_bcp(1));
 360 }
 361 
 362 void TemplateTable::sipush()
 363 {
 364   transition(vtos, itos);
 365   __ load_unsigned_short(r0, at_bcp(1));
 366   __ revw(r0, r0);
 367   __ asrw(r0, r0, 16);
 368 }
 369 
 370 void TemplateTable::ldc(bool wide)
 371 {
 372   transition(vtos, vtos);
 373   Label call_ldc, notFloat, notClass, Done;
 374 
 375   if (wide) {
 376     __ get_unsigned_2_byte_index_at_bcp(r1, 1);
 377   } else {
 378     __ load_unsigned_byte(r1, at_bcp(1));
 379   }
 380   __ get_cpool_and_tags(r2, r0);
 381 
 382   const int base_offset = ConstantPool::header_size() * wordSize;
 383   const int tags_offset = Array<u1>::base_offset_in_bytes();
 384 
 385   // get type
 386   __ add(r3, r1, tags_offset);
 387   __ lea(r3, Address(r0, r3));
 388   __ ldarb(r3, r3);
 389 
 390   // unresolved class - get the resolved class
 391   __ cmp(r3, JVM_CONSTANT_UnresolvedClass);
 392   __ br(Assembler::EQ, call_ldc);
 393 
 394   // unresolved class in error state - call into runtime to throw the error
 395   // from the first resolution attempt
 396   __ cmp(r3, JVM_CONSTANT_UnresolvedClassInError);
 397   __ br(Assembler::EQ, call_ldc);
 398 
 399   // resolved class - need to call vm to get java mirror of the class
 400   __ cmp(r3, JVM_CONSTANT_Class);
 401   __ br(Assembler::NE, notClass);
 402 
 403   __ bind(call_ldc);
 404   __ mov(c_rarg1, wide);
 405   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
 406   __ push_ptr(r0);
 407   __ verify_oop(r0);
 408   __ b(Done);
 409 
 410   __ bind(notClass);
 411   __ cmp(r3, JVM_CONSTANT_Float);
 412   __ br(Assembler::NE, notFloat);
 413   // ftos
 414   __ adds(r1, r2, r1, Assembler::LSL, 3);
 415   __ ldrs(v0, Address(r1, base_offset));
 416   __ push_f();
 417   __ b(Done);
 418 
 419   __ bind(notFloat);
 420 #ifdef ASSERT
 421   {
 422     Label L;
 423     __ cmp(r3, JVM_CONSTANT_Integer);
 424     __ br(Assembler::EQ, L);
 425     // String and Object are rewritten to fast_aldc
 426     __ stop("unexpected tag type in ldc");
 427     __ bind(L);
 428   }
 429 #endif
 430   // itos JVM_CONSTANT_Integer only
 431   __ adds(r1, r2, r1, Assembler::LSL, 3);
 432   __ ldrw(r0, Address(r1, base_offset));
 433   __ push_i(r0);
 434   __ bind(Done);
 435 }
 436 
 437 // Fast path for caching oop constants.
 438 void TemplateTable::fast_aldc(bool wide)
 439 {
 440   transition(vtos, atos);
 441 
 442   Register result = r0;
 443   Register tmp = r1;
 444   int index_size = wide ? sizeof(u2) : sizeof(u1);
 445 
 446   Label resolved;
 447 
 448   // We are resolved if the resolved reference cache entry contains a
 449   // non-null object (String, MethodType, etc.)
 450   assert_different_registers(result, tmp);
 451   __ get_cache_index_at_bcp(tmp, 1, index_size);
 452   __ load_resolved_reference_at_index(result, tmp);
 453   __ cbnz(result, resolved);
 454 
 455   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 456 
 457   // first time invocation - must resolve first
 458   __ mov(tmp, (int)bytecode());
 459   __ call_VM(result, entry, tmp);
 460 
 461   __ bind(resolved);
 462 
 463   if (VerifyOops) {
 464     __ verify_oop(result);
 465   }
 466 }
 467 
 468 void TemplateTable::ldc2_w()
 469 {
 470   transition(vtos, vtos);
 471   Label Long, Done;
 472   __ get_unsigned_2_byte_index_at_bcp(r0, 1);
 473 
 474   __ get_cpool_and_tags(r1, r2);
 475   const int base_offset = ConstantPool::header_size() * wordSize;
 476   const int tags_offset = Array<u1>::base_offset_in_bytes();
 477 
 478   // get type
 479   __ lea(r2, Address(r2, r0, Address::lsl(0)));
 480   __ load_unsigned_byte(r2, Address(r2, tags_offset));
 481   __ cmpw(r2, (int)JVM_CONSTANT_Double);
 482   __ br(Assembler::NE, Long);
 483   // dtos
 484   __ lea (r2, Address(r1, r0, Address::lsl(3)));
 485   __ ldrd(v0, Address(r2, base_offset));
 486   __ push_d();
 487   __ b(Done);
 488 
 489   __ bind(Long);
 490   // ltos
 491   __ lea(r0, Address(r1, r0, Address::lsl(3)));
 492   __ ldr(r0, Address(r0, base_offset));
 493   __ push_l();
 494 
 495   __ bind(Done);
 496 }
 497 
 498 void TemplateTable::locals_index(Register reg, int offset)
 499 {
 500   __ ldrb(reg, at_bcp(offset));
 501   __ neg(reg, reg);
 502 }
 503 
 504 void TemplateTable::iload() {
 505   iload_internal();
 506 }
 507 
 508 void TemplateTable::nofast_iload() {
 509   iload_internal(may_not_rewrite);
 510 }
 511 
 512 void TemplateTable::iload_internal(RewriteControl rc) {
 513   transition(vtos, itos);
 514   if (RewriteFrequentPairs && rc == may_rewrite) {
 515     Label rewrite, done;
 516     Register bc = r4;
 517 
 518     // get next bytecode
 519     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 520 
 521     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 522     // last two iloads in a pair.  Comparing against fast_iload means that
 523     // the next bytecode is neither an iload or a caload, and therefore
 524     // an iload pair.
 525     __ cmpw(r1, Bytecodes::_iload);
 526     __ br(Assembler::EQ, done);
 527 
 528     // if _fast_iload rewrite to _fast_iload2
 529     __ cmpw(r1, Bytecodes::_fast_iload);
 530     __ movw(bc, Bytecodes::_fast_iload2);
 531     __ br(Assembler::EQ, rewrite);
 532 
 533     // if _caload rewrite to _fast_icaload
 534     __ cmpw(r1, Bytecodes::_caload);
 535     __ movw(bc, Bytecodes::_fast_icaload);
 536     __ br(Assembler::EQ, rewrite);
 537 
 538     // else rewrite to _fast_iload
 539     __ movw(bc, Bytecodes::_fast_iload);
 540 
 541     // rewrite
 542     // bc: new bytecode
 543     __ bind(rewrite);
 544     patch_bytecode(Bytecodes::_iload, bc, r1, false);
 545     __ bind(done);
 546 
 547   }
 548 
 549   // do iload, get the local value into tos
 550   locals_index(r1);
 551   __ ldr(r0, iaddress(r1));
 552 
 553 }
 554 
 555 void TemplateTable::fast_iload2()
 556 {
 557   transition(vtos, itos);
 558   locals_index(r1);
 559   __ ldr(r0, iaddress(r1));
 560   __ push(itos);
 561   locals_index(r1, 3);
 562   __ ldr(r0, iaddress(r1));
 563 }
 564 
 565 void TemplateTable::fast_iload()
 566 {
 567   transition(vtos, itos);
 568   locals_index(r1);
 569   __ ldr(r0, iaddress(r1));
 570 }
 571 
 572 void TemplateTable::lload()
 573 {
 574   transition(vtos, ltos);
 575   __ ldrb(r1, at_bcp(1));
 576   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 577   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 578 }
 579 
 580 void TemplateTable::fload()
 581 {
 582   transition(vtos, ftos);
 583   locals_index(r1);
 584   // n.b. we use ldrd here because this is a 64 bit slot
 585   // this is comparable to the iload case
 586   __ ldrd(v0, faddress(r1));
 587 }
 588 
 589 void TemplateTable::dload()
 590 {
 591   transition(vtos, dtos);
 592   __ ldrb(r1, at_bcp(1));
 593   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 594   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 595 }
 596 
 597 void TemplateTable::aload()
 598 {
 599   transition(vtos, atos);
 600   locals_index(r1);
 601   __ ldr(r0, iaddress(r1));
 602 }
 603 
 604 void TemplateTable::locals_index_wide(Register reg) {
 605   __ ldrh(reg, at_bcp(2));
 606   __ rev16w(reg, reg);
 607   __ neg(reg, reg);
 608 }
 609 
 610 void TemplateTable::wide_iload() {
 611   transition(vtos, itos);
 612   locals_index_wide(r1);
 613   __ ldr(r0, iaddress(r1));
 614 }
 615 
 616 void TemplateTable::wide_lload()
 617 {
 618   transition(vtos, ltos);
 619   __ ldrh(r1, at_bcp(2));
 620   __ rev16w(r1, r1);
 621   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 622   __ ldr(r0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 623 }
 624 
 625 void TemplateTable::wide_fload()
 626 {
 627   transition(vtos, ftos);
 628   locals_index_wide(r1);
 629   // n.b. we use ldrd here because this is a 64 bit slot
 630   // this is comparable to the iload case
 631   __ ldrd(v0, faddress(r1));
 632 }
 633 
 634 void TemplateTable::wide_dload()
 635 {
 636   transition(vtos, dtos);
 637   __ ldrh(r1, at_bcp(2));
 638   __ rev16w(r1, r1);
 639   __ sub(r1, rlocals, r1, ext::uxtw, LogBytesPerWord);
 640   __ ldrd(v0, Address(r1, Interpreter::local_offset_in_bytes(1)));
 641 }
 642 
 643 void TemplateTable::wide_aload()
 644 {
 645   transition(vtos, atos);
 646   locals_index_wide(r1);
 647   __ ldr(r0, aaddress(r1));
 648 }
 649 
 650 void TemplateTable::index_check(Register array, Register index)
 651 {
 652   // destroys r1, rscratch1
 653   // check array
 654   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 655   // sign extend index for use by indexed load
 656   // __ movl2ptr(index, index);
 657   // check index
 658   Register length = rscratch1;
 659   __ ldrw(length, Address(array, arrayOopDesc::length_offset_in_bytes()));
 660   __ cmpw(index, length);
 661   if (index != r1) {
 662     // ??? convention: move aberrant index into r1 for exception message
 663     assert(r1 != array, "different registers");
 664     __ mov(r1, index);
 665   }
 666   Label ok;
 667   __ br(Assembler::LO, ok);
 668   __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 669   __ br(rscratch1);
 670   __ bind(ok);
 671 }
 672 
 673 void TemplateTable::iaload()
 674 {
 675   transition(itos, itos);
 676   __ mov(r1, r0);
 677   __ pop_ptr(r0);
 678   // r0: array
 679   // r1: index
 680   index_check(r0, r1); // leaves index in r1, kills rscratch1
 681   __ lea(r1, Address(r0, r1, Address::uxtw(2)));
 682   __ ldrw(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_INT)));
 683 }
 684 
 685 void TemplateTable::laload()
 686 {
 687   transition(itos, ltos);
 688   __ mov(r1, r0);
 689   __ pop_ptr(r0);
 690   // r0: array
 691   // r1: index
 692   index_check(r0, r1); // leaves index in r1, kills rscratch1
 693   __ lea(r1, Address(r0, r1, Address::uxtw(3)));
 694   __ ldr(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_LONG)));
 695 }
 696 
 697 void TemplateTable::faload()
 698 {
 699   transition(itos, ftos);
 700   __ mov(r1, r0);
 701   __ pop_ptr(r0);
 702   // r0: array
 703   // r1: index
 704   index_check(r0, r1); // leaves index in r1, kills rscratch1
 705   __ lea(r1,  Address(r0, r1, Address::uxtw(2)));
 706   __ ldrs(v0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
 707 }
 708 
 709 void TemplateTable::daload()
 710 {
 711   transition(itos, dtos);
 712   __ mov(r1, r0);
 713   __ pop_ptr(r0);
 714   // r0: array
 715   // r1: index
 716   index_check(r0, r1); // leaves index in r1, kills rscratch1
 717   __ lea(r1,  Address(r0, r1, Address::uxtw(3)));
 718   __ ldrd(v0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
 719 }
 720 
 721 void TemplateTable::aaload()
 722 {
 723   transition(itos, atos);
 724   __ mov(r1, r0);
 725   __ pop_ptr(r0);
 726   // r0: array
 727   // r1: index
 728   index_check(r0, r1); // leaves index in r1, kills rscratch1
 729   int s = (UseCompressedOops ? 2 : 3);
 730   __ lea(r1, Address(r0, r1, Address::uxtw(s)));
 731   __ load_heap_oop(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
 732 }
 733 
 734 void TemplateTable::baload()
 735 {
 736   transition(itos, itos);
 737   __ mov(r1, r0);
 738   __ pop_ptr(r0);
 739   // r0: array
 740   // r1: index
 741   index_check(r0, r1); // leaves index in r1, kills rscratch1
 742   __ lea(r1,  Address(r0, r1, Address::uxtw(0)));
 743   __ load_signed_byte(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_BYTE)));
 744 }
 745 
 746 void TemplateTable::caload()
 747 {
 748   transition(itos, itos);
 749   __ mov(r1, r0);
 750   __ pop_ptr(r0);
 751   // r0: array
 752   // r1: index
 753   index_check(r0, r1); // leaves index in r1, kills rscratch1
 754   __ lea(r1,  Address(r0, r1, Address::uxtw(1)));
 755   __ load_unsigned_short(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_CHAR)));
 756 }
 757 
 758 // iload followed by caload frequent pair
 759 void TemplateTable::fast_icaload()
 760 {
 761   transition(vtos, itos);
 762   // load index out of locals
 763   locals_index(r2);
 764   __ ldr(r1, iaddress(r2));
 765 
 766   __ pop_ptr(r0);
 767 
 768   // r0: array
 769   // r1: index
 770   index_check(r0, r1); // leaves index in r1, kills rscratch1
 771   __ lea(r1,  Address(r0, r1, Address::uxtw(1)));
 772   __ load_unsigned_short(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_CHAR)));
 773 }
 774 
 775 void TemplateTable::saload()
 776 {
 777   transition(itos, itos);
 778   __ mov(r1, r0);
 779   __ pop_ptr(r0);
 780   // r0: array
 781   // r1: index
 782   index_check(r0, r1); // leaves index in r1, kills rscratch1
 783   __ lea(r1,  Address(r0, r1, Address::uxtw(1)));
 784   __ load_signed_short(r0, Address(r1,  arrayOopDesc::base_offset_in_bytes(T_SHORT)));
 785 }
 786 
 787 void TemplateTable::iload(int n)
 788 {
 789   transition(vtos, itos);
 790   __ ldr(r0, iaddress(n));
 791 }
 792 
 793 void TemplateTable::lload(int n)
 794 {
 795   transition(vtos, ltos);
 796   __ ldr(r0, laddress(n));
 797 }
 798 
 799 void TemplateTable::fload(int n)
 800 {
 801   transition(vtos, ftos);
 802   __ ldrs(v0, faddress(n));
 803 }
 804 
 805 void TemplateTable::dload(int n)
 806 {
 807   transition(vtos, dtos);
 808   __ ldrd(v0, daddress(n));
 809 }
 810 
 811 void TemplateTable::aload(int n)
 812 {
 813   transition(vtos, atos);
 814   __ ldr(r0, iaddress(n));
 815 }
 816 
 817 void TemplateTable::aload_0() {
 818   aload_0_internal();
 819 }
 820 
 821 void TemplateTable::nofast_aload_0() {
 822   aload_0_internal(may_not_rewrite);
 823 }
 824 
 825 void TemplateTable::aload_0_internal(RewriteControl rc) {
 826   // According to bytecode histograms, the pairs:
 827   //
 828   // _aload_0, _fast_igetfield
 829   // _aload_0, _fast_agetfield
 830   // _aload_0, _fast_fgetfield
 831   //
 832   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 833   // _aload_0 bytecode checks if the next bytecode is either
 834   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 835   // rewrites the current bytecode into a pair bytecode; otherwise it
 836   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 837   // the pair check anymore.
 838   //
 839   // Note: If the next bytecode is _getfield, the rewrite must be
 840   //       delayed, otherwise we may miss an opportunity for a pair.
 841   //
 842   // Also rewrite frequent pairs
 843   //   aload_0, aload_1
 844   //   aload_0, iload_1
 845   // These bytecodes with a small amount of code are most profitable
 846   // to rewrite
 847   if (RewriteFrequentPairs && rc == may_rewrite) {
 848     Label rewrite, done;
 849     const Register bc = r4;
 850 
 851     // get next bytecode
 852     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 853 
 854     // do actual aload_0
 855     aload(0);
 856 
 857     // if _getfield then wait with rewrite
 858     __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
 859     __ br(Assembler::EQ, done);
 860 
 861     // if _igetfield then reqrite to _fast_iaccess_0
 862     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 863     __ cmpw(r1, Bytecodes::_fast_igetfield);
 864     __ movw(bc, Bytecodes::_fast_iaccess_0);
 865     __ br(Assembler::EQ, rewrite);
 866 
 867     // if _agetfield then reqrite to _fast_aaccess_0
 868     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 869     __ cmpw(r1, Bytecodes::_fast_agetfield);
 870     __ movw(bc, Bytecodes::_fast_aaccess_0);
 871     __ br(Assembler::EQ, rewrite);
 872 
 873     // if _fgetfield then reqrite to _fast_faccess_0
 874     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 875     __ cmpw(r1, Bytecodes::_fast_fgetfield);
 876     __ movw(bc, Bytecodes::_fast_faccess_0);
 877     __ br(Assembler::EQ, rewrite);
 878 
 879     // else rewrite to _fast_aload0
 880     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 881     __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
 882 
 883     // rewrite
 884     // bc: new bytecode
 885     __ bind(rewrite);
 886     patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
 887 
 888     __ bind(done);
 889   } else {
 890     aload(0);
 891   }
 892 }
 893 
 894 void TemplateTable::istore()
 895 {
 896   transition(itos, vtos);
 897   locals_index(r1);
 898   // FIXME: We're being very pernickerty here storing a jint in a
 899   // local with strw, which costs an extra instruction over what we'd
 900   // be able to do with a simple str.  We should just store the whole
 901   // word.
 902   __ lea(rscratch1, iaddress(r1));
 903   __ strw(r0, Address(rscratch1));
 904 }
 905 
 906 void TemplateTable::lstore()
 907 {
 908   transition(ltos, vtos);
 909   locals_index(r1);
 910   __ str(r0, laddress(r1, rscratch1, _masm));
 911 }
 912 
 913 void TemplateTable::fstore() {
 914   transition(ftos, vtos);
 915   locals_index(r1);
 916   __ lea(rscratch1, iaddress(r1));
 917   __ strs(v0, Address(rscratch1));
 918 }
 919 
 920 void TemplateTable::dstore() {
 921   transition(dtos, vtos);
 922   locals_index(r1);
 923   __ strd(v0, daddress(r1, rscratch1, _masm));
 924 }
 925 
 926 void TemplateTable::astore()
 927 {
 928   transition(vtos, vtos);
 929   __ pop_ptr(r0);
 930   locals_index(r1);
 931   __ str(r0, aaddress(r1));
 932 }
 933 
 934 void TemplateTable::wide_istore() {
 935   transition(vtos, vtos);
 936   __ pop_i();
 937   locals_index_wide(r1);
 938   __ lea(rscratch1, iaddress(r1));
 939   __ strw(r0, Address(rscratch1));
 940 }
 941 
 942 void TemplateTable::wide_lstore() {
 943   transition(vtos, vtos);
 944   __ pop_l();
 945   locals_index_wide(r1);
 946   __ str(r0, laddress(r1, rscratch1, _masm));
 947 }
 948 
 949 void TemplateTable::wide_fstore() {
 950   transition(vtos, vtos);
 951   __ pop_f();
 952   locals_index_wide(r1);
 953   __ lea(rscratch1, faddress(r1));
 954   __ strs(v0, rscratch1);
 955 }
 956 
 957 void TemplateTable::wide_dstore() {
 958   transition(vtos, vtos);
 959   __ pop_d();
 960   locals_index_wide(r1);
 961   __ strd(v0, daddress(r1, rscratch1, _masm));
 962 }
 963 
 964 void TemplateTable::wide_astore() {
 965   transition(vtos, vtos);
 966   __ pop_ptr(r0);
 967   locals_index_wide(r1);
 968   __ str(r0, aaddress(r1));
 969 }
 970 
 971 void TemplateTable::iastore() {
 972   transition(itos, vtos);
 973   __ pop_i(r1);
 974   __ pop_ptr(r3);
 975   // r0: value
 976   // r1: index
 977   // r3: array
 978   index_check(r3, r1); // prefer index in r1
 979   __ lea(rscratch1, Address(r3, r1, Address::uxtw(2)));
 980   __ strw(r0, Address(rscratch1,
 981                       arrayOopDesc::base_offset_in_bytes(T_INT)));
 982 }
 983 
 984 void TemplateTable::lastore() {
 985   transition(ltos, vtos);
 986   __ pop_i(r1);
 987   __ pop_ptr(r3);
 988   // r0: value
 989   // r1: index
 990   // r3: array
 991   index_check(r3, r1); // prefer index in r1
 992   __ lea(rscratch1, Address(r3, r1, Address::uxtw(3)));
 993   __ str(r0, Address(rscratch1,
 994                       arrayOopDesc::base_offset_in_bytes(T_LONG)));
 995 }
 996 
 997 void TemplateTable::fastore() {
 998   transition(ftos, vtos);
 999   __ pop_i(r1);
1000   __ pop_ptr(r3);
1001   // v0: value
1002   // r1:  index
1003   // r3:  array
1004   index_check(r3, r1); // prefer index in r1
1005   __ lea(rscratch1, Address(r3, r1, Address::uxtw(2)));
1006   __ strs(v0, Address(rscratch1,
1007                       arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
1008 }
1009 
1010 void TemplateTable::dastore() {
1011   transition(dtos, vtos);
1012   __ pop_i(r1);
1013   __ pop_ptr(r3);
1014   // v0: value
1015   // r1:  index
1016   // r3:  array
1017   index_check(r3, r1); // prefer index in r1
1018   __ lea(rscratch1, Address(r3, r1, Address::uxtw(3)));
1019   __ strd(v0, Address(rscratch1,
1020                       arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
1021 }
1022 
1023 void TemplateTable::aastore() {
1024   Label is_null, ok_is_subtype, done;
1025   transition(vtos, vtos);
1026   // stack: ..., array, index, value
1027   __ ldr(r0, at_tos());    // value
1028   __ ldr(r2, at_tos_p1()); // index
1029   __ ldr(r3, at_tos_p2()); // array
1030 
1031   Address element_address(r4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1032 
1033   index_check(r3, r2);     // kills r1
1034   __ lea(r4, Address(r3, r2, Address::uxtw(UseCompressedOops? 2 : 3)));
1035 
1036   // do array store check - check for NULL value first
1037   __ cbz(r0, is_null);
1038 
1039   // Move subklass into r1
1040   __ load_klass(r1, r0);
1041   // Move superklass into r0
1042   __ load_klass(r0, r3);
1043   __ ldr(r0, Address(r0,
1044                      ObjArrayKlass::element_klass_offset()));
1045   // Compress array + index*oopSize + 12 into a single register.  Frees r2.
1046 
1047   // Generate subtype check.  Blows r2, r5
1048   // Superklass in r0.  Subklass in r1.
1049   __ gen_subtype_check(r1, ok_is_subtype);
1050 
1051   // Come here on failure
1052   // object is at TOS
1053   __ b(Interpreter::_throw_ArrayStoreException_entry);
1054 
1055   // Come here on success
1056   __ bind(ok_is_subtype);
1057 
1058   // Get the value we will store
1059   __ ldr(r0, at_tos());
1060   // Now store using the appropriate barrier
1061   do_oop_store(_masm, element_address, r0, _bs->kind(), true);
1062   __ b(done);
1063 
1064   // Have a NULL in r0, r3=array, r2=index.  Store NULL at ary[idx]
1065   __ bind(is_null);
1066   __ profile_null_seen(r2);
1067 
1068   // Store a NULL
1069   do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1070 
1071   // Pop stack arguments
1072   __ bind(done);
1073   __ add(esp, esp, 3 * Interpreter::stackElementSize);
1074 }
1075 
1076 void TemplateTable::bastore()
1077 {
1078   transition(itos, vtos);
1079   __ pop_i(r1);
1080   __ pop_ptr(r3);
1081   // r0: value
1082   // r1: index
1083   // r3: array
1084   index_check(r3, r1); // prefer index in r1
1085   __ lea(rscratch1, Address(r3, r1, Address::uxtw(0)));
1086   __ strb(r0, Address(rscratch1,
1087                       arrayOopDesc::base_offset_in_bytes(T_BYTE)));
1088 }
1089 
1090 void TemplateTable::castore()
1091 {
1092   transition(itos, vtos);
1093   __ pop_i(r1);
1094   __ pop_ptr(r3);
1095   // r0: value
1096   // r1: index
1097   // r3: array
1098   index_check(r3, r1); // prefer index in r1
1099   __ lea(rscratch1, Address(r3, r1, Address::uxtw(1)));
1100   __ strh(r0, Address(rscratch1,
1101                       arrayOopDesc::base_offset_in_bytes(T_CHAR)));
1102 }
1103 
1104 void TemplateTable::sastore()
1105 {
1106   castore();
1107 }
1108 
1109 void TemplateTable::istore(int n)
1110 {
1111   transition(itos, vtos);
1112   __ str(r0, iaddress(n));
1113 }
1114 
1115 void TemplateTable::lstore(int n)
1116 {
1117   transition(ltos, vtos);
1118   __ str(r0, laddress(n));
1119 }
1120 
1121 void TemplateTable::fstore(int n)
1122 {
1123   transition(ftos, vtos);
1124   __ strs(v0, faddress(n));
1125 }
1126 
1127 void TemplateTable::dstore(int n)
1128 {
1129   transition(dtos, vtos);
1130   __ strd(v0, daddress(n));
1131 }
1132 
1133 void TemplateTable::astore(int n)
1134 {
1135   transition(vtos, vtos);
1136   __ pop_ptr(r0);
1137   __ str(r0, iaddress(n));
1138 }
1139 
1140 void TemplateTable::pop()
1141 {
1142   transition(vtos, vtos);
1143   __ add(esp, esp, Interpreter::stackElementSize);
1144 }
1145 
1146 void TemplateTable::pop2()
1147 {
1148   transition(vtos, vtos);
1149   __ add(esp, esp, 2 * Interpreter::stackElementSize);
1150 }
1151 
1152 void TemplateTable::dup()
1153 {
1154   transition(vtos, vtos);
1155   __ ldr(r0, Address(esp, 0));
1156   __ push(r0);
1157   // stack: ..., a, a
1158 }
1159 
1160 void TemplateTable::dup_x1()
1161 {
1162   transition(vtos, vtos);
1163   // stack: ..., a, b
1164   __ ldr(r0, at_tos());  // load b
1165   __ ldr(r2, at_tos_p1());  // load a
1166   __ str(r0, at_tos_p1());  // store b
1167   __ str(r2, at_tos());  // store a
1168   __ push(r0);                  // push b
1169   // stack: ..., b, a, b
1170 }
1171 
1172 void TemplateTable::dup_x2()
1173 {
1174   transition(vtos, vtos);
1175   // stack: ..., a, b, c
1176   __ ldr(r0, at_tos());  // load c
1177   __ ldr(r2, at_tos_p2());  // load a
1178   __ str(r0, at_tos_p2());  // store c in a
1179   __ push(r0);      // push c
1180   // stack: ..., c, b, c, c
1181   __ ldr(r0, at_tos_p2());  // load b
1182   __ str(r2, at_tos_p2());  // store a in b
1183   // stack: ..., c, a, c, c
1184   __ str(r0, at_tos_p1());  // store b in c
1185   // stack: ..., c, a, b, c
1186 }
1187 
1188 void TemplateTable::dup2()
1189 {
1190   transition(vtos, vtos);
1191   // stack: ..., a, b
1192   __ ldr(r0, at_tos_p1());  // load a
1193   __ push(r0);                  // push a
1194   __ ldr(r0, at_tos_p1());  // load b
1195   __ push(r0);                  // push b
1196   // stack: ..., a, b, a, b
1197 }
1198 
1199 void TemplateTable::dup2_x1()
1200 {
1201   transition(vtos, vtos);
1202   // stack: ..., a, b, c
1203   __ ldr(r2, at_tos());  // load c
1204   __ ldr(r0, at_tos_p1());  // load b
1205   __ push(r0);                  // push b
1206   __ push(r2);                  // push c
1207   // stack: ..., a, b, c, b, c
1208   __ str(r2, at_tos_p3());  // store c in b
1209   // stack: ..., a, c, c, b, c
1210   __ ldr(r2, at_tos_p4());  // load a
1211   __ str(r2, at_tos_p2());  // store a in 2nd c
1212   // stack: ..., a, c, a, b, c
1213   __ str(r0, at_tos_p4());  // store b in a
1214   // stack: ..., b, c, a, b, c
1215 }
1216 
1217 void TemplateTable::dup2_x2()
1218 {
1219   transition(vtos, vtos);
1220   // stack: ..., a, b, c, d
1221   __ ldr(r2, at_tos());  // load d
1222   __ ldr(r0, at_tos_p1());  // load c
1223   __ push(r0)            ;      // push c
1224   __ push(r2);                  // push d
1225   // stack: ..., a, b, c, d, c, d
1226   __ ldr(r0, at_tos_p4());  // load b
1227   __ str(r0, at_tos_p2());  // store b in d
1228   __ str(r2, at_tos_p4());  // store d in b
1229   // stack: ..., a, d, c, b, c, d
1230   __ ldr(r2, at_tos_p5());  // load a
1231   __ ldr(r0, at_tos_p3());  // load c
1232   __ str(r2, at_tos_p3());  // store a in c
1233   __ str(r0, at_tos_p5());  // store c in a
1234   // stack: ..., c, d, a, b, c, d
1235 }
1236 
1237 void TemplateTable::swap()
1238 {
1239   transition(vtos, vtos);
1240   // stack: ..., a, b
1241   __ ldr(r2, at_tos_p1());  // load a
1242   __ ldr(r0, at_tos());  // load b
1243   __ str(r2, at_tos());  // store a in b
1244   __ str(r0, at_tos_p1());  // store b in a
1245   // stack: ..., b, a
1246 }
1247 
1248 void TemplateTable::iop2(Operation op)
1249 {
1250   transition(itos, itos);
1251   // r0 <== r1 op r0
1252   __ pop_i(r1);
1253   switch (op) {
1254   case add  : __ addw(r0, r1, r0); break;
1255   case sub  : __ subw(r0, r1, r0); break;
1256   case mul  : __ mulw(r0, r1, r0); break;
1257   case _and : __ andw(r0, r1, r0); break;
1258   case _or  : __ orrw(r0, r1, r0); break;
1259   case _xor : __ eorw(r0, r1, r0); break;
1260   case shl  : __ lslvw(r0, r1, r0); break;
1261   case shr  : __ asrvw(r0, r1, r0); break;
1262   case ushr : __ lsrvw(r0, r1, r0);break;
1263   default   : ShouldNotReachHere();
1264   }
1265 }
1266 
1267 void TemplateTable::lop2(Operation op)
1268 {
1269   transition(ltos, ltos);
1270   // r0 <== r1 op r0
1271   __ pop_l(r1);
1272   switch (op) {
1273   case add  : __ add(r0, r1, r0); break;
1274   case sub  : __ sub(r0, r1, r0); break;
1275   case mul  : __ mul(r0, r1, r0); break;
1276   case _and : __ andr(r0, r1, r0); break;
1277   case _or  : __ orr(r0, r1, r0); break;
1278   case _xor : __ eor(r0, r1, r0); break;
1279   default   : ShouldNotReachHere();
1280   }
1281 }
1282 
1283 void TemplateTable::idiv()
1284 {
1285   transition(itos, itos);
1286   // explicitly check for div0
1287   Label no_div0;
1288   __ cbnzw(r0, no_div0);
1289   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1290   __ br(rscratch1);
1291   __ bind(no_div0);
1292   __ pop_i(r1);
1293   // r0 <== r1 idiv r0
1294   __ corrected_idivl(r0, r1, r0, /* want_remainder */ false);
1295 }
1296 
1297 void TemplateTable::irem()
1298 {
1299   transition(itos, itos);
1300   // explicitly check for div0
1301   Label no_div0;
1302   __ cbnzw(r0, no_div0);
1303   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1304   __ br(rscratch1);
1305   __ bind(no_div0);
1306   __ pop_i(r1);
1307   // r0 <== r1 irem r0
1308   __ corrected_idivl(r0, r1, r0, /* want_remainder */ true);
1309 }
1310 
1311 void TemplateTable::lmul()
1312 {
1313   transition(ltos, ltos);
1314   __ pop_l(r1);
1315   __ mul(r0, r0, r1);
1316 }
1317 
1318 void TemplateTable::ldiv()
1319 {
1320   transition(ltos, ltos);
1321   // explicitly check for div0
1322   Label no_div0;
1323   __ cbnz(r0, no_div0);
1324   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1325   __ br(rscratch1);
1326   __ bind(no_div0);
1327   __ pop_l(r1);
1328   // r0 <== r1 ldiv r0
1329   __ corrected_idivq(r0, r1, r0, /* want_remainder */ false);
1330 }
1331 
1332 void TemplateTable::lrem()
1333 {
1334   transition(ltos, ltos);
1335   // explicitly check for div0
1336   Label no_div0;
1337   __ cbnz(r0, no_div0);
1338   __ mov(rscratch1, Interpreter::_throw_ArithmeticException_entry);
1339   __ br(rscratch1);
1340   __ bind(no_div0);
1341   __ pop_l(r1);
1342   // r0 <== r1 lrem r0
1343   __ corrected_idivq(r0, r1, r0, /* want_remainder */ true);
1344 }
1345 
1346 void TemplateTable::lshl()
1347 {
1348   transition(itos, ltos);
1349   // shift count is in r0
1350   __ pop_l(r1);
1351   __ lslv(r0, r1, r0);
1352 }
1353 
1354 void TemplateTable::lshr()
1355 {
1356   transition(itos, ltos);
1357   // shift count is in r0
1358   __ pop_l(r1);
1359   __ asrv(r0, r1, r0);
1360 }
1361 
1362 void TemplateTable::lushr()
1363 {
1364   transition(itos, ltos);
1365   // shift count is in r0
1366   __ pop_l(r1);
1367   __ lsrv(r0, r1, r0);
1368 }
1369 
1370 void TemplateTable::fop2(Operation op)
1371 {
1372   transition(ftos, ftos);
1373   switch (op) {
1374   case add:
1375     // n.b. use ldrd because this is a 64 bit slot
1376     __ pop_f(v1);
1377     __ fadds(v0, v1, v0);
1378     break;
1379   case sub:
1380     __ pop_f(v1);
1381     __ fsubs(v0, v1, v0);
1382     break;
1383   case mul:
1384     __ pop_f(v1);
1385     __ fmuls(v0, v1, v0);
1386     break;
1387   case div:
1388     __ pop_f(v1);
1389     __ fdivs(v0, v1, v0);
1390     break;
1391   case rem:
1392     __ fmovs(v1, v0);
1393     __ pop_f(v0);
1394     __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::frem),
1395                          0, 2, MacroAssembler::ret_type_float);
1396     break;
1397   default:
1398     ShouldNotReachHere();
1399     break;
1400   }
1401 }
1402 
1403 void TemplateTable::dop2(Operation op)
1404 {
1405   transition(dtos, dtos);
1406   switch (op) {
1407   case add:
1408     // n.b. use ldrd because this is a 64 bit slot
1409     __ pop_d(v1);
1410     __ faddd(v0, v1, v0);
1411     break;
1412   case sub:
1413     __ pop_d(v1);
1414     __ fsubd(v0, v1, v0);
1415     break;
1416   case mul:
1417     __ pop_d(v1);
1418     __ fmuld(v0, v1, v0);
1419     break;
1420   case div:
1421     __ pop_d(v1);
1422     __ fdivd(v0, v1, v0);
1423     break;
1424   case rem:
1425     __ fmovd(v1, v0);
1426     __ pop_d(v0);
1427     __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::drem),
1428                          0, 2, MacroAssembler::ret_type_double);
1429     break;
1430   default:
1431     ShouldNotReachHere();
1432     break;
1433   }
1434 }
1435 
1436 void TemplateTable::ineg()
1437 {
1438   transition(itos, itos);
1439   __ negw(r0, r0);
1440 
1441 }
1442 
1443 void TemplateTable::lneg()
1444 {
1445   transition(ltos, ltos);
1446   __ neg(r0, r0);
1447 }
1448 
1449 void TemplateTable::fneg()
1450 {
1451   transition(ftos, ftos);
1452   __ fnegs(v0, v0);
1453 }
1454 
1455 void TemplateTable::dneg()
1456 {
1457   transition(dtos, dtos);
1458   __ fnegd(v0, v0);
1459 }
1460 
1461 void TemplateTable::iinc()
1462 {
1463   transition(vtos, vtos);
1464   __ load_signed_byte(r1, at_bcp(2)); // get constant
1465   locals_index(r2);
1466   __ ldr(r0, iaddress(r2));
1467   __ addw(r0, r0, r1);
1468   __ str(r0, iaddress(r2));
1469 }
1470 
1471 void TemplateTable::wide_iinc()
1472 {
1473   transition(vtos, vtos);
1474   // __ mov(r1, zr);
1475   __ ldrw(r1, at_bcp(2)); // get constant and index
1476   __ rev16(r1, r1);
1477   __ ubfx(r2, r1, 0, 16);
1478   __ neg(r2, r2);
1479   __ sbfx(r1, r1, 16, 16);
1480   __ ldr(r0, iaddress(r2));
1481   __ addw(r0, r0, r1);
1482   __ str(r0, iaddress(r2));
1483 }
1484 
1485 void TemplateTable::convert()
1486 {
1487   // Checking
1488 #ifdef ASSERT
1489   {
1490     TosState tos_in  = ilgl;
1491     TosState tos_out = ilgl;
1492     switch (bytecode()) {
1493     case Bytecodes::_i2l: // fall through
1494     case Bytecodes::_i2f: // fall through
1495     case Bytecodes::_i2d: // fall through
1496     case Bytecodes::_i2b: // fall through
1497     case Bytecodes::_i2c: // fall through
1498     case Bytecodes::_i2s: tos_in = itos; break;
1499     case Bytecodes::_l2i: // fall through
1500     case Bytecodes::_l2f: // fall through
1501     case Bytecodes::_l2d: tos_in = ltos; break;
1502     case Bytecodes::_f2i: // fall through
1503     case Bytecodes::_f2l: // fall through
1504     case Bytecodes::_f2d: tos_in = ftos; break;
1505     case Bytecodes::_d2i: // fall through
1506     case Bytecodes::_d2l: // fall through
1507     case Bytecodes::_d2f: tos_in = dtos; break;
1508     default             : ShouldNotReachHere();
1509     }
1510     switch (bytecode()) {
1511     case Bytecodes::_l2i: // fall through
1512     case Bytecodes::_f2i: // fall through
1513     case Bytecodes::_d2i: // fall through
1514     case Bytecodes::_i2b: // fall through
1515     case Bytecodes::_i2c: // fall through
1516     case Bytecodes::_i2s: tos_out = itos; break;
1517     case Bytecodes::_i2l: // fall through
1518     case Bytecodes::_f2l: // fall through
1519     case Bytecodes::_d2l: tos_out = ltos; break;
1520     case Bytecodes::_i2f: // fall through
1521     case Bytecodes::_l2f: // fall through
1522     case Bytecodes::_d2f: tos_out = ftos; break;
1523     case Bytecodes::_i2d: // fall through
1524     case Bytecodes::_l2d: // fall through
1525     case Bytecodes::_f2d: tos_out = dtos; break;
1526     default             : ShouldNotReachHere();
1527     }
1528     transition(tos_in, tos_out);
1529   }
1530 #endif // ASSERT
1531   // static const int64_t is_nan = 0x8000000000000000L;
1532 
1533   // Conversion
1534   switch (bytecode()) {
1535   case Bytecodes::_i2l:
1536     __ sxtw(r0, r0);
1537     break;
1538   case Bytecodes::_i2f:
1539     __ scvtfws(v0, r0);
1540     break;
1541   case Bytecodes::_i2d:
1542     __ scvtfwd(v0, r0);
1543     break;
1544   case Bytecodes::_i2b:
1545     __ sxtbw(r0, r0);
1546     break;
1547   case Bytecodes::_i2c:
1548     __ uxthw(r0, r0);
1549     break;
1550   case Bytecodes::_i2s:
1551     __ sxthw(r0, r0);
1552     break;
1553   case Bytecodes::_l2i:
1554     __ uxtw(r0, r0);
1555     break;
1556   case Bytecodes::_l2f:
1557     __ scvtfs(v0, r0);
1558     break;
1559   case Bytecodes::_l2d:
1560     __ scvtfd(v0, r0);
1561     break;
1562   case Bytecodes::_f2i:
1563   {
1564     Label L_Okay;
1565     __ clear_fpsr();
1566     __ fcvtzsw(r0, v0);
1567     __ get_fpsr(r1);
1568     __ cbzw(r1, L_Okay);
1569     __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::f2i),
1570                          0, 1, MacroAssembler::ret_type_integral);
1571     __ bind(L_Okay);
1572   }
1573     break;
1574   case Bytecodes::_f2l:
1575   {
1576     Label L_Okay;
1577     __ clear_fpsr();
1578     __ fcvtzs(r0, v0);
1579     __ get_fpsr(r1);
1580     __ cbzw(r1, L_Okay);
1581     __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::f2l),
1582                          0, 1, MacroAssembler::ret_type_integral);
1583     __ bind(L_Okay);
1584   }
1585     break;
1586   case Bytecodes::_f2d:
1587     __ fcvts(v0, v0);
1588     break;
1589   case Bytecodes::_d2i:
1590   {
1591     Label L_Okay;
1592     __ clear_fpsr();
1593     __ fcvtzdw(r0, v0);
1594     __ get_fpsr(r1);
1595     __ cbzw(r1, L_Okay);
1596     __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::d2i),
1597                          0, 1, MacroAssembler::ret_type_integral);
1598     __ bind(L_Okay);
1599   }
1600     break;
1601   case Bytecodes::_d2l:
1602   {
1603     Label L_Okay;
1604     __ clear_fpsr();
1605     __ fcvtzd(r0, v0);
1606     __ get_fpsr(r1);
1607     __ cbzw(r1, L_Okay);
1608     __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::d2l),
1609                          0, 1, MacroAssembler::ret_type_integral);
1610     __ bind(L_Okay);
1611   }
1612     break;
1613   case Bytecodes::_d2f:
1614     __ fcvtd(v0, v0);
1615     break;
1616   default:
1617     ShouldNotReachHere();
1618   }
1619 }
1620 
1621 void TemplateTable::lcmp()
1622 {
1623   transition(ltos, itos);
1624   Label done;
1625   __ pop_l(r1);
1626   __ cmp(r1, r0);
1627   __ mov(r0, (u_int64_t)-1L);
1628   __ br(Assembler::LT, done);
1629   // __ mov(r0, 1UL);
1630   // __ csel(r0, r0, zr, Assembler::NE);
1631   // and here is a faster way
1632   __ csinc(r0, zr, zr, Assembler::EQ);
1633   __ bind(done);
1634 }
1635 
1636 void TemplateTable::float_cmp(bool is_float, int unordered_result)
1637 {
1638   Label done;
1639   if (is_float) {
1640     // XXX get rid of pop here, use ... reg, mem32
1641     __ pop_f(v1);
1642     __ fcmps(v1, v0);
1643   } else {
1644     // XXX get rid of pop here, use ... reg, mem64
1645     __ pop_d(v1);
1646     __ fcmpd(v1, v0);
1647   }
1648   if (unordered_result < 0) {
1649     // we want -1 for unordered or less than, 0 for equal and 1 for
1650     // greater than.
1651     __ mov(r0, (u_int64_t)-1L);
1652     // for FP LT tests less than or unordered
1653     __ br(Assembler::LT, done);
1654     // install 0 for EQ otherwise 1
1655     __ csinc(r0, zr, zr, Assembler::EQ);
1656   } else {
1657     // we want -1 for less than, 0 for equal and 1 for unordered or
1658     // greater than.
1659     __ mov(r0, 1L);
1660     // for FP HI tests greater than or unordered
1661     __ br(Assembler::HI, done);
1662     // install 0 for EQ otherwise ~0
1663     __ csinv(r0, zr, zr, Assembler::EQ);
1664 
1665   }
1666   __ bind(done);
1667 }
1668 
1669 void TemplateTable::branch(bool is_jsr, bool is_wide)
1670 {
1671   // We might be moving to a safepoint.  The thread which calls
1672   // Interpreter::notice_safepoints() will effectively flush its cache
1673   // when it makes a system call, but we need to do something to
1674   // ensure that we see the changed dispatch table.
1675   __ membar(MacroAssembler::LoadLoad);
1676 
1677   __ profile_taken_branch(r0, r1);
1678   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1679                              InvocationCounter::counter_offset();
1680   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1681                               InvocationCounter::counter_offset();
1682 
1683   // load branch displacement
1684   if (!is_wide) {
1685     __ ldrh(r2, at_bcp(1));
1686     __ rev16(r2, r2);
1687     // sign extend the 16 bit value in r2
1688     __ sbfm(r2, r2, 0, 15);
1689   } else {
1690     __ ldrw(r2, at_bcp(1));
1691     __ revw(r2, r2);
1692     // sign extend the 32 bit value in r2
1693     __ sbfm(r2, r2, 0, 31);
1694   }
1695 
1696   // Handle all the JSR stuff here, then exit.
1697   // It's much shorter and cleaner than intermingling with the non-JSR
1698   // normal-branch stuff occurring below.
1699 
1700   if (is_jsr) {
1701     // Pre-load the next target bytecode into rscratch1
1702     __ load_unsigned_byte(rscratch1, Address(rbcp, r2));
1703     // compute return address as bci
1704     __ ldr(rscratch2, Address(rmethod, Method::const_offset()));
1705     __ add(rscratch2, rscratch2,
1706            in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
1707     __ sub(r1, rbcp, rscratch2);
1708     __ push_i(r1);
1709     // Adjust the bcp by the 16-bit displacement in r2
1710     __ add(rbcp, rbcp, r2);
1711     __ dispatch_only(vtos);
1712     return;
1713   }
1714 
1715   // Normal (non-jsr) branch handling
1716 
1717   // Adjust the bcp by the displacement in r2
1718   __ add(rbcp, rbcp, r2);
1719 
1720   assert(UseLoopCounter || !UseOnStackReplacement,
1721          "on-stack-replacement requires loop counters");
1722   Label backedge_counter_overflow;
1723   Label profile_method;
1724   Label dispatch;
1725   if (UseLoopCounter) {
1726     // increment backedge counter for backward branches
1727     // r0: MDO
1728     // w1: MDO bumped taken-count
1729     // r2: target offset
1730     __ cmp(r2, zr);
1731     __ br(Assembler::GT, dispatch); // count only if backward branch
1732 
1733     // ECN: FIXME: This code smells
1734     // check if MethodCounters exists
1735     Label has_counters;
1736     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1737     __ cbnz(rscratch1, has_counters);
1738     __ push(r0);
1739     __ push(r1);
1740     __ push(r2);
1741     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
1742             InterpreterRuntime::build_method_counters), rmethod);
1743     __ pop(r2);
1744     __ pop(r1);
1745     __ pop(r0);
1746     __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1747     __ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
1748     __ bind(has_counters);
1749 
1750     if (TieredCompilation) {
1751       Label no_mdo;
1752       int increment = InvocationCounter::count_increment;
1753       if (ProfileInterpreter) {
1754         // Are we profiling?
1755         __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1756         __ cbz(r1, no_mdo);
1757         // Increment the MDO backedge counter
1758         const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
1759                                            in_bytes(InvocationCounter::counter_offset()));
1760         const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
1761         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1762                                    r0, rscratch1, false, Assembler::EQ, &backedge_counter_overflow);
1763         __ b(dispatch);
1764       }
1765       __ bind(no_mdo);
1766       // Increment backedge counter in MethodCounters*
1767       __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
1768       const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
1769       __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
1770                                  r0, rscratch2, false, Assembler::EQ, &backedge_counter_overflow);
1771     } else { // not TieredCompilation
1772       // increment counter
1773       __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
1774       __ ldrw(r0, Address(rscratch2, be_offset));        // load backedge counter
1775       __ addw(rscratch1, r0, InvocationCounter::count_increment); // increment counter
1776       __ strw(rscratch1, Address(rscratch2, be_offset));        // store counter
1777 
1778       __ ldrw(r0, Address(rscratch2, inv_offset));    // load invocation counter
1779       __ andw(r0, r0, (unsigned)InvocationCounter::count_mask_value); // and the status bits
1780       __ addw(r0, r0, rscratch1);        // add both counters
1781 
1782       if (ProfileInterpreter) {
1783         // Test to see if we should create a method data oop
1784         __ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
1785         __ cmpw(r0, rscratch1);
1786         __ br(Assembler::LT, dispatch);
1787 
1788         // if no method data exists, go to profile method
1789         __ test_method_data_pointer(r0, profile_method);
1790 
1791         if (UseOnStackReplacement) {
1792           // check for overflow against w1 which is the MDO taken count
1793           __ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
1794           __ cmpw(r1, rscratch1);
1795           __ br(Assembler::LO, dispatch); // Intel == Assembler::below
1796 
1797           // When ProfileInterpreter is on, the backedge_count comes
1798           // from the MethodData*, which value does not get reset on
1799           // the call to frequency_counter_overflow().  To avoid
1800           // excessive calls to the overflow routine while the method is
1801           // being compiled, add a second test to make sure the overflow
1802           // function is called only once every overflow_frequency.
1803           const int overflow_frequency = 1024;
1804           __ andsw(r1, r1, overflow_frequency - 1);
1805           __ br(Assembler::EQ, backedge_counter_overflow);
1806 
1807         }
1808       } else {
1809         if (UseOnStackReplacement) {
1810           // check for overflow against w0, which is the sum of the
1811           // counters
1812           __ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
1813           __ cmpw(r0, rscratch1);
1814           __ br(Assembler::HS, backedge_counter_overflow); // Intel == Assembler::aboveEqual
1815         }
1816       }
1817     }
1818   }
1819   __ bind(dispatch);
1820 
1821   // Pre-load the next target bytecode into rscratch1
1822   __ load_unsigned_byte(rscratch1, Address(rbcp, 0));
1823 
1824   // continue with the bytecode @ target
1825   // rscratch1: target bytecode
1826   // rbcp: target bcp
1827   __ dispatch_only(vtos);
1828 
1829   if (UseLoopCounter) {
1830     if (ProfileInterpreter) {
1831       // Out-of-line code to allocate method data oop.
1832       __ bind(profile_method);
1833       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1834       __ load_unsigned_byte(r1, Address(rbcp, 0));  // restore target bytecode
1835       __ set_method_data_pointer_for_bcp();
1836       __ b(dispatch);
1837     }
1838 
1839     if (TieredCompilation || UseOnStackReplacement) {
1840       // invocation counter overflow
1841       __ bind(backedge_counter_overflow);
1842       __ neg(r2, r2);
1843       __ add(r2, r2, rbcp);     // branch bcp
1844       // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1845       __ call_VM(noreg,
1846                  CAST_FROM_FN_PTR(address,
1847                                   InterpreterRuntime::frequency_counter_overflow),
1848                  r2);
1849       if (!UseOnStackReplacement)
1850         __ b(dispatch);
1851     }
1852 
1853     if (UseOnStackReplacement) {
1854       __ load_unsigned_byte(r1, Address(rbcp, 0));  // restore target bytecode
1855 
1856       // r0: osr nmethod (osr ok) or NULL (osr not possible)
1857       // w1: target bytecode
1858       // r2: scratch
1859       __ cbz(r0, dispatch);     // test result -- no osr if null
1860       // nmethod may have been invalidated (VM may block upon call_VM return)
1861       __ ldrb(r2, Address(r0, nmethod::state_offset()));
1862       if (nmethod::in_use != 0)
1863         __ sub(r2, r2, nmethod::in_use);
1864       __ cbnz(r2, dispatch);
1865 
1866       // We have the address of an on stack replacement routine in r0
1867       // We need to prepare to execute the OSR method. First we must
1868       // migrate the locals and monitors off of the stack.
1869 
1870       __ mov(r19, r0);                             // save the nmethod
1871 
1872       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1873 
1874       // r0 is OSR buffer, move it to expected parameter location
1875       __ mov(j_rarg0, r0);
1876 
1877       // remove activation
1878       // get sender esp
1879       __ ldr(esp,
1880           Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
1881       // remove frame anchor
1882       __ leave();
1883       // Ensure compiled code always sees stack at proper alignment
1884       __ andr(sp, esp, -16);
1885 
1886       // and begin the OSR nmethod
1887       __ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
1888       __ br(rscratch1);
1889     }
1890   }
1891 }
1892 
1893 
1894 void TemplateTable::if_0cmp(Condition cc)
1895 {
1896   transition(itos, vtos);
1897   // assume branch is more often taken than not (loops use backward branches)
1898   Label not_taken;
1899   if (cc == equal)
1900     __ cbnzw(r0, not_taken);
1901   else if (cc == not_equal)
1902     __ cbzw(r0, not_taken);
1903   else {
1904     __ andsw(zr, r0, r0);
1905     __ br(j_not(cc), not_taken);
1906   }
1907 
1908   branch(false, false);
1909   __ bind(not_taken);
1910   __ profile_not_taken_branch(r0);
1911 }
1912 
1913 void TemplateTable::if_icmp(Condition cc)
1914 {
1915   transition(itos, vtos);
1916   // assume branch is more often taken than not (loops use backward branches)
1917   Label not_taken;
1918   __ pop_i(r1);
1919   __ cmpw(r1, r0, Assembler::LSL);
1920   __ br(j_not(cc), not_taken);
1921   branch(false, false);
1922   __ bind(not_taken);
1923   __ profile_not_taken_branch(r0);
1924 }
1925 
1926 void TemplateTable::if_nullcmp(Condition cc)
1927 {
1928   transition(atos, vtos);
1929   // assume branch is more often taken than not (loops use backward branches)
1930   Label not_taken;
1931   if (cc == equal)
1932     __ cbnz(r0, not_taken);
1933   else
1934     __ cbz(r0, not_taken);
1935   branch(false, false);
1936   __ bind(not_taken);
1937   __ profile_not_taken_branch(r0);
1938 }
1939 
1940 void TemplateTable::if_acmp(Condition cc)
1941 {
1942   transition(atos, vtos);
1943   // assume branch is more often taken than not (loops use backward branches)
1944   Label not_taken;
1945   __ pop_ptr(r1);
1946   __ cmp(r1, r0);
1947   __ br(j_not(cc), not_taken);
1948   branch(false, false);
1949   __ bind(not_taken);
1950   __ profile_not_taken_branch(r0);
1951 }
1952 
1953 void TemplateTable::ret() {
1954   transition(vtos, vtos);
1955   // We might be moving to a safepoint.  The thread which calls
1956   // Interpreter::notice_safepoints() will effectively flush its cache
1957   // when it makes a system call, but we need to do something to
1958   // ensure that we see the changed dispatch table.
1959   __ membar(MacroAssembler::LoadLoad);
1960 
1961   locals_index(r1);
1962   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1963   __ profile_ret(r1, r2);
1964   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1965   __ lea(rbcp, Address(rbcp, r1));
1966   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1967   __ dispatch_next(vtos);
1968 }
1969 
1970 void TemplateTable::wide_ret() {
1971   transition(vtos, vtos);
1972   locals_index_wide(r1);
1973   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
1974   __ profile_ret(r1, r2);
1975   __ ldr(rbcp, Address(rmethod, Method::const_offset()));
1976   __ lea(rbcp, Address(rbcp, r1));
1977   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
1978   __ dispatch_next(vtos);
1979 }
1980 
1981 
1982 void TemplateTable::tableswitch() {
1983   Label default_case, continue_execution;
1984   transition(itos, vtos);
1985   // align rbcp
1986   __ lea(r1, at_bcp(BytesPerInt));
1987   __ andr(r1, r1, -BytesPerInt);
1988   // load lo & hi
1989   __ ldrw(r2, Address(r1, BytesPerInt));
1990   __ ldrw(r3, Address(r1, 2 * BytesPerInt));
1991   __ rev32(r2, r2);
1992   __ rev32(r3, r3);
1993   // check against lo & hi
1994   __ cmpw(r0, r2);
1995   __ br(Assembler::LT, default_case);
1996   __ cmpw(r0, r3);
1997   __ br(Assembler::GT, default_case);
1998   // lookup dispatch offset
1999   __ subw(r0, r0, r2);
2000   __ lea(r3, Address(r1, r0, Address::uxtw(2)));
2001   __ ldrw(r3, Address(r3, 3 * BytesPerInt));
2002   __ profile_switch_case(r0, r1, r2);
2003   // continue execution
2004   __ bind(continue_execution);
2005   __ rev32(r3, r3);
2006   __ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
2007   __ add(rbcp, rbcp, r3, ext::sxtw);
2008   __ dispatch_only(vtos);
2009   // handle default
2010   __ bind(default_case);
2011   __ profile_switch_default(r0);
2012   __ ldrw(r3, Address(r1, 0));
2013   __ b(continue_execution);
2014 }
2015 
2016 void TemplateTable::lookupswitch() {
2017   transition(itos, itos);
2018   __ stop("lookupswitch bytecode should have been rewritten");
2019 }
2020 
2021 void TemplateTable::fast_linearswitch() {
2022   transition(itos, vtos);
2023   Label loop_entry, loop, found, continue_execution;
2024   // bswap r0 so we can avoid bswapping the table entries
2025   __ rev32(r0, r0);
2026   // align rbcp
2027   __ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2028                                     // this instruction (change offsets
2029                                     // below)
2030   __ andr(r19, r19, -BytesPerInt);
2031   // set counter
2032   __ ldrw(r1, Address(r19, BytesPerInt));
2033   __ rev32(r1, r1);
2034   __ b(loop_entry);
2035   // table search
2036   __ bind(loop);
2037   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2038   __ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
2039   __ cmpw(r0, rscratch1);
2040   __ br(Assembler::EQ, found);
2041   __ bind(loop_entry);
2042   __ subs(r1, r1, 1);
2043   __ br(Assembler::PL, loop);
2044   // default case
2045   __ profile_switch_default(r0);
2046   __ ldrw(r3, Address(r19, 0));
2047   __ b(continue_execution);
2048   // entry found -> get offset
2049   __ bind(found);
2050   __ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
2051   __ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
2052   __ profile_switch_case(r1, r0, r19);
2053   // continue execution
2054   __ bind(continue_execution);
2055   __ rev32(r3, r3);
2056   __ add(rbcp, rbcp, r3, ext::sxtw);
2057   __ ldrb(rscratch1, Address(rbcp, 0));
2058   __ dispatch_only(vtos);
2059 }
2060 
2061 void TemplateTable::fast_binaryswitch() {
2062   transition(itos, vtos);
2063   // Implementation using the following core algorithm:
2064   //
2065   // int binary_search(int key, LookupswitchPair* array, int n) {
2066   //   // Binary search according to "Methodik des Programmierens" by
2067   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2068   //   int i = 0;
2069   //   int j = n;
2070   //   while (i+1 < j) {
2071   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2072   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2073   //     // where a stands for the array and assuming that the (inexisting)
2074   //     // element a[n] is infinitely big.
2075   //     int h = (i + j) >> 1;
2076   //     // i < h < j
2077   //     if (key < array[h].fast_match()) {
2078   //       j = h;
2079   //     } else {
2080   //       i = h;
2081   //     }
2082   //   }
2083   //   // R: a[i] <= key < a[i+1] or Q
2084   //   // (i.e., if key is within array, i is the correct index)
2085   //   return i;
2086   // }
2087 
2088   // Register allocation
2089   const Register key   = r0; // already set (tosca)
2090   const Register array = r1;
2091   const Register i     = r2;
2092   const Register j     = r3;
2093   const Register h     = rscratch1;
2094   const Register temp  = rscratch2;
2095 
2096   // Find array start
2097   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2098                                           // get rid of this
2099                                           // instruction (change
2100                                           // offsets below)
2101   __ andr(array, array, -BytesPerInt);
2102 
2103   // Initialize i & j
2104   __ mov(i, 0);                            // i = 0;
2105   __ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
2106 
2107   // Convert j into native byteordering
2108   __ rev32(j, j);
2109 
2110   // And start
2111   Label entry;
2112   __ b(entry);
2113 
2114   // binary search loop
2115   {
2116     Label loop;
2117     __ bind(loop);
2118     // int h = (i + j) >> 1;
2119     __ addw(h, i, j);                           // h = i + j;
2120     __ lsrw(h, h, 1);                                   // h = (i + j) >> 1;
2121     // if (key < array[h].fast_match()) {
2122     //   j = h;
2123     // } else {
2124     //   i = h;
2125     // }
2126     // Convert array[h].match to native byte-ordering before compare
2127     __ ldr(temp, Address(array, h, Address::lsl(3)));
2128     __ rev32(temp, temp);
2129     __ cmpw(key, temp);
2130     // j = h if (key <  array[h].fast_match())
2131     __ csel(j, h, j, Assembler::LT);
2132     // i = h if (key >= array[h].fast_match())
2133     __ csel(i, h, i, Assembler::GE);
2134     // while (i+1 < j)
2135     __ bind(entry);
2136     __ addw(h, i, 1);          // i+1
2137     __ cmpw(h, j);             // i+1 < j
2138     __ br(Assembler::LT, loop);
2139   }
2140 
2141   // end of binary search, result index is i (must check again!)
2142   Label default_case;
2143   // Convert array[i].match to native byte-ordering before compare
2144   __ ldr(temp, Address(array, i, Address::lsl(3)));
2145   __ rev32(temp, temp);
2146   __ cmpw(key, temp);
2147   __ br(Assembler::NE, default_case);
2148 
2149   // entry found -> j = offset
2150   __ add(j, array, i, ext::uxtx, 3);
2151   __ ldrw(j, Address(j, BytesPerInt));
2152   __ profile_switch_case(i, key, array);
2153   __ rev32(j, j);
2154   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2155   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2156   __ dispatch_only(vtos);
2157 
2158   // default case -> j = default offset
2159   __ bind(default_case);
2160   __ profile_switch_default(i);
2161   __ ldrw(j, Address(array, -2 * BytesPerInt));
2162   __ rev32(j, j);
2163   __ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
2164   __ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
2165   __ dispatch_only(vtos);
2166 }
2167 
2168 
2169 void TemplateTable::_return(TosState state)
2170 {
2171   transition(state, state);
2172   assert(_desc->calls_vm(),
2173          "inconsistent calls_vm information"); // call in remove_activation
2174 
2175   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2176     assert(state == vtos, "only valid state");
2177 
2178     __ ldr(c_rarg1, aaddress(0));
2179     __ load_klass(r3, c_rarg1);
2180     __ ldrw(r3, Address(r3, Klass::access_flags_offset()));
2181     __ tst(r3, JVM_ACC_HAS_FINALIZER);
2182     Label skip_register_finalizer;
2183     __ br(Assembler::EQ, skip_register_finalizer);
2184 
2185     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2186 
2187     __ bind(skip_register_finalizer);
2188   }
2189 
2190   // Issue a StoreStore barrier after all stores but before return
2191   // from any constructor for any class with a final field.  We don't
2192   // know if this is a finalizer, so we always do so.
2193   if (_desc->bytecode() == Bytecodes::_return)
2194     __ membar(MacroAssembler::StoreStore);
2195 
2196   __ remove_activation(state);
2197   __ ret(lr);
2198 }
2199 
2200 // ----------------------------------------------------------------------------
2201 // Volatile variables demand their effects be made known to all CPU's
2202 // in order.  Store buffers on most chips allow reads & writes to
2203 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2204 // without some kind of memory barrier (i.e., it's not sufficient that
2205 // the interpreter does not reorder volatile references, the hardware
2206 // also must not reorder them).
2207 //
2208 // According to the new Java Memory Model (JMM):
2209 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2210 //     writes act as aquire & release, so:
2211 // (2) A read cannot let unrelated NON-volatile memory refs that
2212 //     happen after the read float up to before the read.  It's OK for
2213 //     non-volatile memory refs that happen before the volatile read to
2214 //     float down below it.
2215 // (3) Similar a volatile write cannot let unrelated NON-volatile
2216 //     memory refs that happen BEFORE the write float down to after the
2217 //     write.  It's OK for non-volatile memory refs that happen after the
2218 //     volatile write to float up before it.
2219 //
2220 // We only put in barriers around volatile refs (they are expensive),
2221 // not _between_ memory refs (that would require us to track the
2222 // flavor of the previous memory refs).  Requirements (2) and (3)
2223 // require some barriers before volatile stores and after volatile
2224 // loads.  These nearly cover requirement (1) but miss the
2225 // volatile-store-volatile-load case.  This final case is placed after
2226 // volatile-stores although it could just as well go before
2227 // volatile-loads.
2228 
2229 void TemplateTable::resolve_cache_and_index(int byte_no,
2230                                             Register Rcache,
2231                                             Register index,
2232                                             size_t index_size) {
2233   const Register temp = r19;
2234   assert_different_registers(Rcache, index, temp);
2235 
2236   Label resolved;
2237 
2238   Bytecodes::Code code = bytecode();
2239   switch (code) {
2240   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2241   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2242   }
2243 
2244   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2245   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2246   __ cmp(temp, (int) code);  // have we resolved this bytecode?
2247   __ br(Assembler::EQ, resolved);
2248 
2249   // resolve first time through
2250   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2251   __ mov(temp, (int) code);
2252   __ call_VM(noreg, entry, temp);
2253 
2254   // Update registers with resolved info
2255   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2256   // n.b. unlike x86 Rcache is now rcpool plus the indexed offset
2257   // so all clients ofthis method must be modified accordingly
2258   __ bind(resolved);
2259 }
2260 
2261 // The Rcache and index registers must be set before call
2262 // n.b unlike x86 cache already includes the index offset
2263 void TemplateTable::load_field_cp_cache_entry(Register obj,
2264                                               Register cache,
2265                                               Register index,
2266                                               Register off,
2267                                               Register flags,
2268                                               bool is_static = false) {
2269   assert_different_registers(cache, index, flags, off);
2270 
2271   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2272   // Field offset
2273   __ ldr(off, Address(cache, in_bytes(cp_base_offset +
2274                                           ConstantPoolCacheEntry::f2_offset())));
2275   // Flags
2276   __ ldrw(flags, Address(cache, in_bytes(cp_base_offset +
2277                                            ConstantPoolCacheEntry::flags_offset())));
2278 
2279   // klass overwrite register
2280   if (is_static) {
2281     __ ldr(obj, Address(cache, in_bytes(cp_base_offset +
2282                                         ConstantPoolCacheEntry::f1_offset())));
2283     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2284     __ ldr(obj, Address(obj, mirror_offset));
2285   }
2286 }
2287 
2288 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2289                                                Register method,
2290                                                Register itable_index,
2291                                                Register flags,
2292                                                bool is_invokevirtual,
2293                                                bool is_invokevfinal, /*unused*/
2294                                                bool is_invokedynamic) {
2295   // setup registers
2296   const Register cache = rscratch2;
2297   const Register index = r4;
2298   assert_different_registers(method, flags);
2299   assert_different_registers(method, cache, index);
2300   assert_different_registers(itable_index, flags);
2301   assert_different_registers(itable_index, cache, index);
2302   // determine constant pool cache field offsets
2303   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2304   const int method_offset = in_bytes(
2305     ConstantPoolCache::base_offset() +
2306       (is_invokevirtual
2307        ? ConstantPoolCacheEntry::f2_offset()
2308        : ConstantPoolCacheEntry::f1_offset()));
2309   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2310                                     ConstantPoolCacheEntry::flags_offset());
2311   // access constant pool cache fields
2312   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2313                                     ConstantPoolCacheEntry::f2_offset());
2314 
2315   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2316   resolve_cache_and_index(byte_no, cache, index, index_size);
2317   __ ldr(method, Address(cache, method_offset));
2318 
2319   if (itable_index != noreg) {
2320     __ ldr(itable_index, Address(cache, index_offset));
2321   }
2322   __ ldrw(flags, Address(cache, flags_offset));
2323 }
2324 
2325 
2326 // The registers cache and index expected to be set before call.
2327 // Correct values of the cache and index registers are preserved.
2328 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2329                                             bool is_static, bool has_tos) {
2330   // do the JVMTI work here to avoid disturbing the register state below
2331   // We use c_rarg registers here because we want to use the register used in
2332   // the call to the VM
2333   if (JvmtiExport::can_post_field_access()) {
2334     // Check to see if a field access watch has been set before we
2335     // take the time to call into the VM.
2336     Label L1;
2337     assert_different_registers(cache, index, r0);
2338     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2339     __ ldrw(r0, Address(rscratch1));
2340     __ cbzw(r0, L1);
2341 
2342     __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2343     __ lea(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset())));
2344 
2345     if (is_static) {
2346       __ mov(c_rarg1, zr); // NULL object reference
2347     } else {
2348       __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
2349       __ verify_oop(c_rarg1);
2350     }
2351     // c_rarg1: object pointer or NULL
2352     // c_rarg2: cache entry pointer
2353     // c_rarg3: jvalue object on the stack
2354     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2355                                        InterpreterRuntime::post_field_access),
2356                c_rarg1, c_rarg2, c_rarg3);
2357     __ get_cache_and_index_at_bcp(cache, index, 1);
2358     __ bind(L1);
2359   }
2360 }
2361 
2362 void TemplateTable::pop_and_check_object(Register r)
2363 {
2364   __ pop_ptr(r);
2365   __ null_check(r);  // for field access must check obj.
2366   __ verify_oop(r);
2367 }
2368 
2369 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
2370 {
2371   const Register cache = r2;
2372   const Register index = r3;
2373   const Register obj   = r4;
2374   const Register off   = r19;
2375   const Register flags = r0;
2376   const Register bc    = r4; // uses same reg as obj, so don't mix them
2377 
2378   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2379   jvmti_post_field_access(cache, index, is_static, false);
2380   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2381 
2382   if (!is_static) {
2383     // obj is on the stack
2384     pop_and_check_object(obj);
2385   }
2386 
2387   const Address field(obj, off);
2388 
2389   Label Done, notByte, notInt, notShort, notChar,
2390               notLong, notFloat, notObj, notDouble;
2391 
2392   // x86 uses a shift and mask or wings it with a shift plus assert
2393   // the mask is not needed. aarch64 just uses bitfield extract
2394   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2395 
2396   assert(btos == 0, "change code, btos != 0");
2397   __ cbnz(flags, notByte);
2398 
2399   // Don't rewrite getstatic, only getfield
2400   if (is_static) rc = may_not_rewrite;
2401 
2402   // btos
2403   __ load_signed_byte(r0, field);
2404   __ push(btos);
2405   // Rewrite bytecode to be faster
2406   if (rc == may_rewrite) {
2407     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2408   }
2409   __ b(Done);
2410 
2411   __ bind(notByte);
2412   __ cmp(flags, atos);
2413   __ br(Assembler::NE, notObj);
2414   // atos
2415   __ load_heap_oop(r0, field);
2416   __ push(atos);
2417   if (rc == may_rewrite) {
2418     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2419   }
2420   __ b(Done);
2421 
2422   __ bind(notObj);
2423   __ cmp(flags, itos);
2424   __ br(Assembler::NE, notInt);
2425   // itos
2426   __ ldrw(r0, field);
2427   __ push(itos);
2428   // Rewrite bytecode to be faster
2429   if (rc == may_rewrite) {
2430     patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2431   }
2432   __ b(Done);
2433 
2434   __ bind(notInt);
2435   __ cmp(flags, ctos);
2436   __ br(Assembler::NE, notChar);
2437   // ctos
2438   __ load_unsigned_short(r0, field);
2439   __ push(ctos);
2440   // Rewrite bytecode to be faster
2441   if (rc == may_rewrite) {
2442     patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
2443   }
2444   __ b(Done);
2445 
2446   __ bind(notChar);
2447   __ cmp(flags, stos);
2448   __ br(Assembler::NE, notShort);
2449   // stos
2450   __ load_signed_short(r0, field);
2451   __ push(stos);
2452   // Rewrite bytecode to be faster
2453   if (rc == may_rewrite) {
2454     patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
2455   }
2456   __ b(Done);
2457 
2458   __ bind(notShort);
2459   __ cmp(flags, ltos);
2460   __ br(Assembler::NE, notLong);
2461   // ltos
2462   __ ldr(r0, field);
2463   __ push(ltos);
2464   // Rewrite bytecode to be faster
2465   if (rc == may_rewrite) {
2466     patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
2467   }
2468   __ b(Done);
2469 
2470   __ bind(notLong);
2471   __ cmp(flags, ftos);
2472   __ br(Assembler::NE, notFloat);
2473   // ftos
2474   __ ldrs(v0, field);
2475   __ push(ftos);
2476   // Rewrite bytecode to be faster
2477   if (rc == may_rewrite) {
2478     patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
2479   }
2480   __ b(Done);
2481 
2482   __ bind(notFloat);
2483 #ifdef ASSERT
2484   __ cmp(flags, dtos);
2485   __ br(Assembler::NE, notDouble);
2486 #endif
2487   // dtos
2488   __ ldrd(v0, field);
2489   __ push(dtos);
2490   // Rewrite bytecode to be faster
2491   if (rc == may_rewrite) {
2492     patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
2493   }
2494 #ifdef ASSERT
2495   __ b(Done);
2496 
2497   __ bind(notDouble);
2498   __ stop("Bad state");
2499 #endif
2500 
2501   __ bind(Done);
2502   // It's really not worth bothering to check whether this field
2503   // really is volatile in the slow case.
2504   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2505 }
2506 
2507 
2508 void TemplateTable::getfield(int byte_no)
2509 {
2510   getfield_or_static(byte_no, false);
2511 }
2512 
2513 void TemplateTable::nofast_getfield(int byte_no) {
2514   getfield_or_static(byte_no, false, may_not_rewrite);
2515 }
2516 
2517 void TemplateTable::getstatic(int byte_no)
2518 {
2519   getfield_or_static(byte_no, true);
2520 }
2521 
2522 // The registers cache and index expected to be set before call.
2523 // The function may destroy various registers, just not the cache and index registers.
2524 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2525   transition(vtos, vtos);
2526 
2527   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2528 
2529   if (JvmtiExport::can_post_field_modification()) {
2530     // Check to see if a field modification watch has been set before
2531     // we take the time to call into the VM.
2532     Label L1;
2533     assert_different_registers(cache, index, r0);
2534     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2535     __ ldrw(r0, Address(rscratch1));
2536     __ cbz(r0, L1);
2537 
2538     __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2539 
2540     if (is_static) {
2541       // Life is simple.  Null out the object pointer.
2542       __ mov(c_rarg1, zr);
2543     } else {
2544       // Life is harder. The stack holds the value on top, followed by
2545       // the object.  We don't know the size of the value, though; it
2546       // could be one or two words depending on its type. As a result,
2547       // we must find the type to determine where the object is.
2548       __ ldrw(c_rarg3, Address(c_rarg2,
2549                                in_bytes(cp_base_offset +
2550                                         ConstantPoolCacheEntry::flags_offset())));
2551       __ lsr(c_rarg3, c_rarg3,
2552              ConstantPoolCacheEntry::tos_state_shift);
2553       ConstantPoolCacheEntry::verify_tos_state_shift();
2554       Label nope2, done, ok;
2555       __ ldr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
2556       __ cmpw(c_rarg3, ltos);
2557       __ br(Assembler::EQ, ok);
2558       __ cmpw(c_rarg3, dtos);
2559       __ br(Assembler::NE, nope2);
2560       __ bind(ok);
2561       __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2562       __ bind(nope2);
2563     }
2564     // cache entry pointer
2565     __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
2566     // object (tos)
2567     __ mov(c_rarg3, esp);
2568     // c_rarg1: object pointer set up above (NULL if static)
2569     // c_rarg2: cache entry pointer
2570     // c_rarg3: jvalue object on the stack
2571     __ call_VM(noreg,
2572                CAST_FROM_FN_PTR(address,
2573                                 InterpreterRuntime::post_field_modification),
2574                c_rarg1, c_rarg2, c_rarg3);
2575     __ get_cache_and_index_at_bcp(cache, index, 1);
2576     __ bind(L1);
2577   }
2578 }
2579 
2580 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2581   transition(vtos, vtos);
2582 
2583   const Register cache = r2;
2584   const Register index = r3;
2585   const Register obj   = r2;
2586   const Register off   = r19;
2587   const Register flags = r0;
2588   const Register bc    = r4;
2589 
2590   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2591   jvmti_post_field_mod(cache, index, is_static);
2592   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2593 
2594   Label Done;
2595   __ mov(r5, flags);
2596 
2597   {
2598     Label notVolatile;
2599     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2600     __ membar(MacroAssembler::StoreStore);
2601     __ bind(notVolatile);
2602   }
2603 
2604   // field address
2605   const Address field(obj, off);
2606 
2607   Label notByte, notInt, notShort, notChar,
2608         notLong, notFloat, notObj, notDouble;
2609 
2610   // x86 uses a shift and mask or wings it with a shift plus assert
2611   // the mask is not needed. aarch64 just uses bitfield extract
2612   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
2613 
2614   assert(btos == 0, "change code, btos != 0");
2615   __ cbnz(flags, notByte);
2616 
2617   // Don't rewrite putstatic, only putfield
2618   if (is_static) rc = may_not_rewrite;
2619 
2620   // btos
2621   {
2622     __ pop(btos);
2623     if (!is_static) pop_and_check_object(obj);
2624     __ strb(r0, field);
2625     if (rc == may_rewrite) {
2626       patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2627     }
2628     __ b(Done);
2629   }
2630 
2631   __ bind(notByte);
2632   __ cmp(flags, atos);
2633   __ br(Assembler::NE, notObj);
2634 
2635   // atos
2636   {
2637     __ pop(atos);
2638     if (!is_static) pop_and_check_object(obj);
2639     // Store into the field
2640     do_oop_store(_masm, field, r0, _bs->kind(), false);
2641     if (rc == may_rewrite) {
2642       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2643     }
2644     __ b(Done);
2645   }
2646 
2647   __ bind(notObj);
2648   __ cmp(flags, itos);
2649   __ br(Assembler::NE, notInt);
2650 
2651   // itos
2652   {
2653     __ pop(itos);
2654     if (!is_static) pop_and_check_object(obj);
2655     __ strw(r0, field);
2656     if (rc == may_rewrite) {
2657       patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2658     }
2659     __ b(Done);
2660   }
2661 
2662   __ bind(notInt);
2663   __ cmp(flags, ctos);
2664   __ br(Assembler::NE, notChar);
2665 
2666   // ctos
2667   {
2668     __ pop(ctos);
2669     if (!is_static) pop_and_check_object(obj);
2670     __ strh(r0, field);
2671     if (rc == may_rewrite) {
2672       patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
2673     }
2674     __ b(Done);
2675   }
2676 
2677   __ bind(notChar);
2678   __ cmp(flags, stos);
2679   __ br(Assembler::NE, notShort);
2680 
2681   // stos
2682   {
2683     __ pop(stos);
2684     if (!is_static) pop_and_check_object(obj);
2685     __ strh(r0, field);
2686     if (rc == may_rewrite) {
2687       patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
2688     }
2689     __ b(Done);
2690   }
2691 
2692   __ bind(notShort);
2693   __ cmp(flags, ltos);
2694   __ br(Assembler::NE, notLong);
2695 
2696   // ltos
2697   {
2698     __ pop(ltos);
2699     if (!is_static) pop_and_check_object(obj);
2700     __ str(r0, field);
2701     if (rc == may_rewrite) {
2702       patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
2703     }
2704     __ b(Done);
2705   }
2706 
2707   __ bind(notLong);
2708   __ cmp(flags, ftos);
2709   __ br(Assembler::NE, notFloat);
2710 
2711   // ftos
2712   {
2713     __ pop(ftos);
2714     if (!is_static) pop_and_check_object(obj);
2715     __ strs(v0, field);
2716     if (rc == may_rewrite) {
2717       patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
2718     }
2719     __ b(Done);
2720   }
2721 
2722   __ bind(notFloat);
2723 #ifdef ASSERT
2724   __ cmp(flags, dtos);
2725   __ br(Assembler::NE, notDouble);
2726 #endif
2727 
2728   // dtos
2729   {
2730     __ pop(dtos);
2731     if (!is_static) pop_and_check_object(obj);
2732     __ strd(v0, field);
2733     if (rc == may_rewrite) {
2734       patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
2735     }
2736   }
2737 
2738 #ifdef ASSERT
2739   __ b(Done);
2740 
2741   __ bind(notDouble);
2742   __ stop("Bad state");
2743 #endif
2744 
2745   __ bind(Done);
2746 
2747   {
2748     Label notVolatile;
2749     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2750     __ membar(MacroAssembler::StoreLoad);
2751     __ bind(notVolatile);
2752   }
2753 }
2754 
2755 void TemplateTable::putfield(int byte_no)
2756 {
2757   putfield_or_static(byte_no, false);
2758 }
2759 
2760 void TemplateTable::nofast_putfield(int byte_no) {
2761   putfield_or_static(byte_no, false, may_not_rewrite);
2762 }
2763 
2764 void TemplateTable::putstatic(int byte_no) {
2765   putfield_or_static(byte_no, true);
2766 }
2767 
2768 void TemplateTable::jvmti_post_fast_field_mod()
2769 {
2770   if (JvmtiExport::can_post_field_modification()) {
2771     // Check to see if a field modification watch has been set before
2772     // we take the time to call into the VM.
2773     Label L2;
2774     __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2775     __ ldrw(c_rarg3, Address(rscratch1));
2776     __ cbzw(c_rarg3, L2);
2777     __ pop_ptr(r19);                  // copy the object pointer from tos
2778     __ verify_oop(r19);
2779     __ push_ptr(r19);                 // put the object pointer back on tos
2780     // Save tos values before call_VM() clobbers them. Since we have
2781     // to do it for every data type, we use the saved values as the
2782     // jvalue object.
2783     switch (bytecode()) {          // load values into the jvalue object
2784     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
2785     case Bytecodes::_fast_bputfield: // fall through
2786     case Bytecodes::_fast_sputfield: // fall through
2787     case Bytecodes::_fast_cputfield: // fall through
2788     case Bytecodes::_fast_iputfield: __ push_i(r0); break;
2789     case Bytecodes::_fast_dputfield: __ push_d(); break;
2790     case Bytecodes::_fast_fputfield: __ push_f(); break;
2791     case Bytecodes::_fast_lputfield: __ push_l(r0); break;
2792 
2793     default:
2794       ShouldNotReachHere();
2795     }
2796     __ mov(c_rarg3, esp);             // points to jvalue on the stack
2797     // access constant pool cache entry
2798     __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
2799     __ verify_oop(r19);
2800     // r19: object pointer copied above
2801     // c_rarg2: cache entry pointer
2802     // c_rarg3: jvalue object on the stack
2803     __ call_VM(noreg,
2804                CAST_FROM_FN_PTR(address,
2805                                 InterpreterRuntime::post_field_modification),
2806                r19, c_rarg2, c_rarg3);
2807 
2808     switch (bytecode()) {             // restore tos values
2809     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
2810     case Bytecodes::_fast_bputfield: // fall through
2811     case Bytecodes::_fast_sputfield: // fall through
2812     case Bytecodes::_fast_cputfield: // fall through
2813     case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
2814     case Bytecodes::_fast_dputfield: __ pop_d(); break;
2815     case Bytecodes::_fast_fputfield: __ pop_f(); break;
2816     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
2817     }
2818     __ bind(L2);
2819   }
2820 }
2821 
2822 void TemplateTable::fast_storefield(TosState state)
2823 {
2824   transition(state, vtos);
2825 
2826   ByteSize base = ConstantPoolCache::base_offset();
2827 
2828   jvmti_post_fast_field_mod();
2829 
2830   // access constant pool cache
2831   __ get_cache_and_index_at_bcp(r2, r1, 1);
2832 
2833   // test for volatile with r3
2834   __ ldrw(r3, Address(r2, in_bytes(base +
2835                                    ConstantPoolCacheEntry::flags_offset())));
2836 
2837   // replace index with field offset from cache entry
2838   __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2839 
2840   {
2841     Label notVolatile;
2842     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2843     __ membar(MacroAssembler::StoreStore);
2844     __ bind(notVolatile);
2845   }
2846 
2847   Label notVolatile;
2848 
2849   // Get object from stack
2850   pop_and_check_object(r2);
2851 
2852   // field address
2853   const Address field(r2, r1);
2854 
2855   // access field
2856   switch (bytecode()) {
2857   case Bytecodes::_fast_aputfield:
2858     do_oop_store(_masm, field, r0, _bs->kind(), false);
2859     break;
2860   case Bytecodes::_fast_lputfield:
2861     __ str(r0, field);
2862     break;
2863   case Bytecodes::_fast_iputfield:
2864     __ strw(r0, field);
2865     break;
2866   case Bytecodes::_fast_bputfield:
2867     __ strb(r0, field);
2868     break;
2869   case Bytecodes::_fast_sputfield:
2870     // fall through
2871   case Bytecodes::_fast_cputfield:
2872     __ strh(r0, field);
2873     break;
2874   case Bytecodes::_fast_fputfield:
2875     __ strs(v0, field);
2876     break;
2877   case Bytecodes::_fast_dputfield:
2878     __ strd(v0, field);
2879     break;
2880   default:
2881     ShouldNotReachHere();
2882   }
2883 
2884   {
2885     Label notVolatile;
2886     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2887     __ membar(MacroAssembler::StoreLoad);
2888     __ bind(notVolatile);
2889   }
2890 }
2891 
2892 
2893 void TemplateTable::fast_accessfield(TosState state)
2894 {
2895   transition(atos, state);
2896   // Do the JVMTI work here to avoid disturbing the register state below
2897   if (JvmtiExport::can_post_field_access()) {
2898     // Check to see if a field access watch has been set before we
2899     // take the time to call into the VM.
2900     Label L1;
2901     __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2902     __ ldrw(r2, Address(rscratch1));
2903     __ cbzw(r2, L1);
2904     // access constant pool cache entry
2905     __ get_cache_entry_pointer_at_bcp(c_rarg2, rscratch2, 1);
2906     __ verify_oop(r0);
2907     __ push_ptr(r0);  // save object pointer before call_VM() clobbers it
2908     __ mov(c_rarg1, r0);
2909     // c_rarg1: object pointer copied above
2910     // c_rarg2: cache entry pointer
2911     __ call_VM(noreg,
2912                CAST_FROM_FN_PTR(address,
2913                                 InterpreterRuntime::post_field_access),
2914                c_rarg1, c_rarg2);
2915     __ pop_ptr(r0); // restore object pointer
2916     __ bind(L1);
2917   }
2918 
2919   // access constant pool cache
2920   __ get_cache_and_index_at_bcp(r2, r1, 1);
2921   __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
2922                                   ConstantPoolCacheEntry::f2_offset())));
2923   __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
2924                                    ConstantPoolCacheEntry::flags_offset())));
2925 
2926   // r0: object
2927   __ verify_oop(r0);
2928   __ null_check(r0);
2929   const Address field(r0, r1);
2930 
2931   // access field
2932   switch (bytecode()) {
2933   case Bytecodes::_fast_agetfield:
2934     __ load_heap_oop(r0, field);
2935     __ verify_oop(r0);
2936     break;
2937   case Bytecodes::_fast_lgetfield:
2938     __ ldr(r0, field);
2939     break;
2940   case Bytecodes::_fast_igetfield:
2941     __ ldrw(r0, field);
2942     break;
2943   case Bytecodes::_fast_bgetfield:
2944     __ load_signed_byte(r0, field);
2945     break;
2946   case Bytecodes::_fast_sgetfield:
2947     __ load_signed_short(r0, field);
2948     break;
2949   case Bytecodes::_fast_cgetfield:
2950     __ load_unsigned_short(r0, field);
2951     break;
2952   case Bytecodes::_fast_fgetfield:
2953     __ ldrs(v0, field);
2954     break;
2955   case Bytecodes::_fast_dgetfield:
2956     __ ldrd(v0, field);
2957     break;
2958   default:
2959     ShouldNotReachHere();
2960   }
2961   {
2962     Label notVolatile;
2963     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2964     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2965     __ bind(notVolatile);
2966   }
2967 }
2968 
2969 void TemplateTable::fast_xaccess(TosState state)
2970 {
2971   transition(vtos, state);
2972 
2973   // get receiver
2974   __ ldr(r0, aaddress(0));
2975   // access constant pool cache
2976   __ get_cache_and_index_at_bcp(r2, r3, 2);
2977   __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
2978                                   ConstantPoolCacheEntry::f2_offset())));
2979   // make sure exception is reported in correct bcp range (getfield is
2980   // next instruction)
2981   __ increment(rbcp);
2982   __ null_check(r0);
2983   switch (state) {
2984   case itos:
2985     __ ldr(r0, Address(r0, r1, Address::lsl(0)));
2986     break;
2987   case atos:
2988     __ load_heap_oop(r0, Address(r0, r1, Address::lsl(0)));
2989     __ verify_oop(r0);
2990     break;
2991   case ftos:
2992     __ ldrs(v0, Address(r0, r1, Address::lsl(0)));
2993     break;
2994   default:
2995     ShouldNotReachHere();
2996   }
2997 
2998   {
2999     Label notVolatile;
3000     __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
3001                                      ConstantPoolCacheEntry::flags_offset())));
3002     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3003     __ membar(MacroAssembler::LoadLoad);
3004     __ bind(notVolatile);
3005   }
3006 
3007   __ decrement(rbcp);
3008 }
3009 
3010 
3011 
3012 //-----------------------------------------------------------------------------
3013 // Calls
3014 
3015 void TemplateTable::count_calls(Register method, Register temp)
3016 {
3017   __ call_Unimplemented();
3018 }
3019 
3020 void TemplateTable::prepare_invoke(int byte_no,
3021                                    Register method, // linked method (or i-klass)
3022                                    Register index,  // itable index, MethodType, etc.
3023                                    Register recv,   // if caller wants to see it
3024                                    Register flags   // if caller wants to test it
3025                                    ) {
3026   // determine flags
3027   Bytecodes::Code code = bytecode();
3028   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3029   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3030   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3031   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3032   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3033   const bool load_receiver       = (recv  != noreg);
3034   const bool save_flags          = (flags != noreg);
3035   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3036   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3037   assert(flags == noreg || flags == r3, "");
3038   assert(recv  == noreg || recv  == r2, "");
3039 
3040   // setup registers & access constant pool cache
3041   if (recv  == noreg)  recv  = r2;
3042   if (flags == noreg)  flags = r3;
3043   assert_different_registers(method, index, recv, flags);
3044 
3045   // save 'interpreter return address'
3046   __ save_bcp();
3047 
3048   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3049 
3050   // maybe push appendix to arguments (just before return address)
3051   if (is_invokedynamic || is_invokehandle) {
3052     Label L_no_push;
3053     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
3054     // Push the appendix as a trailing parameter.
3055     // This must be done before we get the receiver,
3056     // since the parameter_size includes it.
3057     __ push(r19);
3058     __ mov(r19, index);
3059     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
3060     __ load_resolved_reference_at_index(index, r19);
3061     __ pop(r19);
3062     __ push(index);  // push appendix (MethodType, CallSite, etc.)
3063     __ bind(L_no_push);
3064   }
3065 
3066   // load receiver if needed (note: no return address pushed yet)
3067   if (load_receiver) {
3068     __ andw(recv, flags, ConstantPoolCacheEntry::parameter_size_mask);
3069     // FIXME -- is this actually correct? looks like it should be 2
3070     // const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
3071     // const int receiver_is_at_end      = -1;  // back off one slot to get receiver
3072     // Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3073     // __ movptr(recv, recv_addr);
3074     __ add(rscratch1, esp, recv, ext::uxtx, 3); // FIXME: uxtb here?
3075     __ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
3076     __ verify_oop(recv);
3077   }
3078 
3079   // compute return type
3080   // x86 uses a shift and mask or wings it with a shift plus assert
3081   // the mask is not needed. aarch64 just uses bitfield extract
3082   __ ubfxw(rscratch2, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
3083   // load return address
3084   {
3085     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3086     __ mov(rscratch1, table_addr);
3087     __ ldr(lr, Address(rscratch1, rscratch2, Address::lsl(3)));
3088   }
3089 }
3090 
3091 
3092 void TemplateTable::invokevirtual_helper(Register index,
3093                                          Register recv,
3094                                          Register flags)
3095 {
3096   // Uses temporary registers r0, r3
3097   assert_different_registers(index, recv, r0, r3);
3098   // Test for an invoke of a final method
3099   Label notFinal;
3100   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
3101 
3102   const Register method = index;  // method must be rmethod
3103   assert(method == rmethod,
3104          "methodOop must be rmethod for interpreter calling convention");
3105 
3106   // do the call - the index is actually the method to call
3107   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3108 
3109   // It's final, need a null check here!
3110   __ null_check(recv);
3111 
3112   // profile this call
3113   __ profile_final_call(r0);
3114   __ profile_arguments_type(r0, method, r4, true);
3115 
3116   __ jump_from_interpreted(method, r0);
3117 
3118   __ bind(notFinal);
3119 
3120   // get receiver klass
3121   __ null_check(recv, oopDesc::klass_offset_in_bytes());
3122   __ load_klass(r0, recv);
3123 
3124   // profile this call
3125   __ profile_virtual_call(r0, rlocals, r3);
3126 
3127   // get target methodOop & entry point
3128   __ lookup_virtual_method(r0, index, method);
3129   __ profile_arguments_type(r3, method, r4, true);
3130   // FIXME -- this looks completely redundant. is it?
3131   // __ ldr(r3, Address(method, Method::interpreter_entry_offset()));
3132   __ jump_from_interpreted(method, r3);
3133 }
3134 
3135 void TemplateTable::invokevirtual(int byte_no)
3136 {
3137   transition(vtos, vtos);
3138   assert(byte_no == f2_byte, "use this argument");
3139 
3140   prepare_invoke(byte_no, rmethod, noreg, r2, r3);
3141 
3142   // rmethod: index (actually a Method*)
3143   // r2: receiver
3144   // r3: flags
3145 
3146   invokevirtual_helper(rmethod, r2, r3);
3147 }
3148 
3149 void TemplateTable::invokespecial(int byte_no)
3150 {
3151   transition(vtos, vtos);
3152   assert(byte_no == f1_byte, "use this argument");
3153 
3154   prepare_invoke(byte_no, rmethod, noreg,  // get f1 Method*
3155                  r2);  // get receiver also for null check
3156   __ verify_oop(r2);
3157   __ null_check(r2);
3158   // do the call
3159   __ profile_call(r0);
3160   __ profile_arguments_type(r0, rmethod, rbcp, false);
3161   __ jump_from_interpreted(rmethod, r0);
3162 }
3163 
3164 void TemplateTable::invokestatic(int byte_no)
3165 {
3166   transition(vtos, vtos);
3167   assert(byte_no == f1_byte, "use this argument");
3168 
3169   prepare_invoke(byte_no, rmethod);  // get f1 Method*
3170   // do the call
3171   __ profile_call(r0);
3172   __ profile_arguments_type(r0, rmethod, r4, false);
3173   __ jump_from_interpreted(rmethod, r0);
3174 }
3175 
3176 void TemplateTable::fast_invokevfinal(int byte_no)
3177 {
3178   __ call_Unimplemented();
3179 }
3180 
3181 void TemplateTable::invokeinterface(int byte_no) {
3182   transition(vtos, vtos);
3183   assert(byte_no == f1_byte, "use this argument");
3184 
3185   prepare_invoke(byte_no, r0, rmethod,  // get f1 Klass*, f2 itable index
3186                  r2, r3); // recv, flags
3187 
3188   // r0: interface klass (from f1)
3189   // rmethod: itable index (from f2)
3190   // r2: receiver
3191   // r3: flags
3192 
3193   // Special case of invokeinterface called for virtual method of
3194   // java.lang.Object.  See cpCacheOop.cpp for details.
3195   // This code isn't produced by javac, but could be produced by
3196   // another compliant java compiler.
3197   Label notMethod;
3198   __ tbz(r3, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
3199 
3200   invokevirtual_helper(rmethod, r2, r3);
3201   __ bind(notMethod);
3202 
3203   // Get receiver klass into r3 - also a null check
3204   __ restore_locals();
3205   __ null_check(r2, oopDesc::klass_offset_in_bytes());
3206   __ load_klass(r3, r2);
3207 
3208   // profile this call
3209   __ profile_virtual_call(r3, r13, r19);
3210 
3211   Label no_such_interface, no_such_method;
3212 
3213   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3214                              r3, r0, rmethod,
3215                              // outputs: method, scan temp. reg
3216                              rmethod, r13,
3217                              no_such_interface);
3218 
3219   // rmethod,: methodOop to call
3220   // r2: receiver
3221   // Check for abstract method error
3222   // Note: This should be done more efficiently via a throw_abstract_method_error
3223   //       interpreter entry point and a conditional jump to it in case of a null
3224   //       method.
3225   __ cbz(rmethod, no_such_method);
3226 
3227   __ profile_arguments_type(r3, rmethod, r13, true);
3228 
3229   // do the call
3230   // r2: receiver
3231   // rmethod,: methodOop
3232   __ jump_from_interpreted(rmethod, r3);
3233   __ should_not_reach_here();
3234 
3235   // exception handling code follows...
3236   // note: must restore interpreter registers to canonical
3237   //       state for exception handling to work correctly!
3238 
3239   __ bind(no_such_method);
3240   // throw exception
3241   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3242   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3243   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3244   // the call_VM checks for exception, so we should never return here.
3245   __ should_not_reach_here();
3246 
3247   __ bind(no_such_interface);
3248   // throw exception
3249   __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
3250   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3251   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3252                    InterpreterRuntime::throw_IncompatibleClassChangeError));
3253   // the call_VM checks for exception, so we should never return here.
3254   __ should_not_reach_here();
3255   return;
3256 }
3257 
3258 void TemplateTable::invokehandle(int byte_no) {
3259   transition(vtos, vtos);
3260   assert(byte_no == f1_byte, "use this argument");
3261 
3262   prepare_invoke(byte_no, rmethod, r0, r2);
3263   __ verify_method_ptr(r2);
3264   __ verify_oop(r2);
3265   __ null_check(r2);
3266 
3267   // FIXME: profile the LambdaForm also
3268 
3269   // r13 is safe to use here as a scratch reg because it is about to
3270   // be clobbered by jump_from_interpreted().
3271   __ profile_final_call(r13);
3272   __ profile_arguments_type(r13, rmethod, r4, true);
3273 
3274   __ jump_from_interpreted(rmethod, r0);
3275 }
3276 
3277 void TemplateTable::invokedynamic(int byte_no) {
3278   transition(vtos, vtos);
3279   assert(byte_no == f1_byte, "use this argument");
3280 
3281   prepare_invoke(byte_no, rmethod, r0);
3282 
3283   // r0: CallSite object (from cpool->resolved_references[])
3284   // rmethod: MH.linkToCallSite method (from f2)
3285 
3286   // Note:  r0_callsite is already pushed by prepare_invoke
3287 
3288   // %%% should make a type profile for any invokedynamic that takes a ref argument
3289   // profile this call
3290   __ profile_call(rbcp);
3291   __ profile_arguments_type(r3, rmethod, r13, false);
3292 
3293   __ verify_oop(r0);
3294 
3295   __ jump_from_interpreted(rmethod, r0);
3296 }
3297 
3298 
3299 //-----------------------------------------------------------------------------
3300 // Allocation
3301 
3302 void TemplateTable::_new() {
3303   transition(vtos, atos);
3304 
3305   __ get_unsigned_2_byte_index_at_bcp(r3, 1);
3306   Label slow_case;
3307   Label done;
3308   Label initialize_header;
3309   Label initialize_object; // including clearing the fields
3310   Label allocate_shared;
3311 
3312   __ get_cpool_and_tags(r4, r0);
3313   // Make sure the class we're about to instantiate has been resolved.
3314   // This is done before loading InstanceKlass to be consistent with the order
3315   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3316   const int tags_offset = Array<u1>::base_offset_in_bytes();
3317   __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
3318   __ lea(rscratch1, Address(rscratch1, tags_offset));
3319   __ ldarb(rscratch1, rscratch1);
3320   __ cmp(rscratch1, JVM_CONSTANT_Class);
3321   __ br(Assembler::NE, slow_case);
3322 
3323   // get InstanceKlass
3324   __ lea(r4, Address(r4, r3, Address::lsl(3)));
3325   __ ldr(r4, Address(r4, sizeof(ConstantPool)));
3326 
3327   // make sure klass is initialized & doesn't have finalizer
3328   // make sure klass is fully initialized
3329   __ ldrb(rscratch1, Address(r4, InstanceKlass::init_state_offset()));
3330   __ cmp(rscratch1, InstanceKlass::fully_initialized);
3331   __ br(Assembler::NE, slow_case);
3332 
3333   // get instance_size in InstanceKlass (scaled to a count of bytes)
3334   __ ldrw(r3,
3335           Address(r4,
3336                   Klass::layout_helper_offset()));
3337   // test to see if it has a finalizer or is malformed in some way
3338   __ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3339 
3340   // Allocate the instance
3341   // 1) Try to allocate in the TLAB
3342   // 2) if fail and the object is large allocate in the shared Eden
3343   // 3) if the above fails (or is not applicable), go to a slow case
3344   // (creates a new TLAB, etc.)
3345 
3346   const bool allow_shared_alloc =
3347     Universe::heap()->supports_inline_contig_alloc();
3348 
3349   if (UseTLAB) {
3350     __ tlab_allocate(r0, r3, 0, noreg, r1,
3351                      allow_shared_alloc ? allocate_shared : slow_case);
3352 
3353     if (ZeroTLAB) {
3354       // the fields have been already cleared
3355       __ b(initialize_header);
3356     } else {
3357       // initialize both the header and fields
3358       __ b(initialize_object);
3359     }
3360   }
3361 
3362   // Allocation in the shared Eden, if allowed.
3363   //
3364   // r3: instance size in bytes
3365   if (allow_shared_alloc) {
3366     __ bind(allocate_shared);
3367 
3368     __ eden_allocate(r0, r3, 0, r10, slow_case);
3369     __ incr_allocated_bytes(rthread, r3, 0, rscratch1);
3370   }
3371 
3372   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3373     // The object is initialized before the header.  If the object size is
3374     // zero, go directly to the header initialization.
3375     __ bind(initialize_object);
3376     __ sub(r3, r3, sizeof(oopDesc));
3377     __ cbz(r3, initialize_header);
3378 
3379     // Initialize object fields
3380     {
3381       __ add(r2, r0, sizeof(oopDesc));
3382       Label loop;
3383       __ bind(loop);
3384       __ str(zr, Address(__ post(r2, BytesPerLong)));
3385       __ sub(r3, r3, BytesPerLong);
3386       __ cbnz(r3, loop);
3387     }
3388 
3389     // initialize object header only.
3390     __ bind(initialize_header);
3391     if (UseBiasedLocking) {
3392       __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset()));
3393     } else {
3394       __ mov(rscratch1, (intptr_t)markOopDesc::prototype());
3395     }
3396     __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
3397     __ store_klass_gap(r0, zr);  // zero klass gap for compressed oops
3398     __ store_klass(r0, r4);      // store klass last
3399 
3400     {
3401       SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3402       // Trigger dtrace event for fastpath
3403       __ push(atos); // save the return value
3404       __ call_VM_leaf(
3405            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3406       __ pop(atos); // restore the return value
3407 
3408     }
3409     __ b(done);
3410   }
3411 
3412   // slow case
3413   __ bind(slow_case);
3414   __ get_constant_pool(c_rarg1);
3415   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3416   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3417   __ verify_oop(r0);
3418 
3419   // continue
3420   __ bind(done);
3421   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3422   __ membar(Assembler::StoreStore);
3423 }
3424 
3425 void TemplateTable::newarray() {
3426   transition(itos, atos);
3427   __ load_unsigned_byte(c_rarg1, at_bcp(1));
3428   __ mov(c_rarg2, r0);
3429   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3430           c_rarg1, c_rarg2);
3431   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3432   __ membar(Assembler::StoreStore);
3433 }
3434 
3435 void TemplateTable::anewarray() {
3436   transition(itos, atos);
3437   __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3438   __ get_constant_pool(c_rarg1);
3439   __ mov(c_rarg3, r0);
3440   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3441           c_rarg1, c_rarg2, c_rarg3);
3442   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3443   __ membar(Assembler::StoreStore);
3444 }
3445 
3446 void TemplateTable::arraylength() {
3447   transition(atos, itos);
3448   __ null_check(r0, arrayOopDesc::length_offset_in_bytes());
3449   __ ldrw(r0, Address(r0, arrayOopDesc::length_offset_in_bytes()));
3450 }
3451 
3452 void TemplateTable::checkcast()
3453 {
3454   transition(atos, atos);
3455   Label done, is_null, ok_is_subtype, quicked, resolved;
3456   __ cbz(r0, is_null);
3457 
3458   // Get cpool & tags index
3459   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3460   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3461   // See if bytecode has already been quicked
3462   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3463   __ lea(r1, Address(rscratch1, r19));
3464   __ ldarb(r1, r1);
3465   __ cmp(r1, JVM_CONSTANT_Class);
3466   __ br(Assembler::EQ, quicked);
3467 
3468   __ push(atos); // save receiver for result, and for GC
3469   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3470   // vm_result_2 has metadata result
3471   __ get_vm_result_2(r0, rthread);
3472   __ pop(r3); // restore receiver
3473   __ b(resolved);
3474 
3475   // Get superklass in r0 and subklass in r3
3476   __ bind(quicked);
3477   __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3478   __ lea(r0, Address(r2, r19, Address::lsl(3)));
3479   __ ldr(r0, Address(r0, sizeof(ConstantPool)));
3480 
3481   __ bind(resolved);
3482   __ load_klass(r19, r3);
3483 
3484   // Generate subtype check.  Blows r2, r5.  Object in r3.
3485   // Superklass in r0.  Subklass in r19.
3486   __ gen_subtype_check(r19, ok_is_subtype);
3487 
3488   // Come here on failure
3489   __ push(r3);
3490   // object is at TOS
3491   __ b(Interpreter::_throw_ClassCastException_entry);
3492 
3493   // Come here on success
3494   __ bind(ok_is_subtype);
3495   __ mov(r0, r3); // Restore object in r3
3496 
3497   // Collect counts on whether this test sees NULLs a lot or not.
3498   if (ProfileInterpreter) {
3499     __ b(done);
3500     __ bind(is_null);
3501     __ profile_null_seen(r2);
3502   } else {
3503     __ bind(is_null);   // same as 'done'
3504   }
3505   __ bind(done);
3506 }
3507 
3508 void TemplateTable::instanceof() {
3509   transition(atos, itos);
3510   Label done, is_null, ok_is_subtype, quicked, resolved;
3511   __ cbz(r0, is_null);
3512 
3513   // Get cpool & tags index
3514   __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3515   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3516   // See if bytecode has already been quicked
3517   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3518   __ lea(r1, Address(rscratch1, r19));
3519   __ ldarb(r1, r1);
3520   __ cmp(r1, JVM_CONSTANT_Class);
3521   __ br(Assembler::EQ, quicked);
3522 
3523   __ push(atos); // save receiver for result, and for GC
3524   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3525   // vm_result_2 has metadata result
3526   __ get_vm_result_2(r0, rthread);
3527   __ pop(r3); // restore receiver
3528   __ verify_oop(r3);
3529   __ load_klass(r3, r3);
3530   __ b(resolved);
3531 
3532   // Get superklass in r0 and subklass in r3
3533   __ bind(quicked);
3534   __ load_klass(r3, r0);
3535   __ lea(r0, Address(r2, r19, Address::lsl(3)));
3536   __ ldr(r0, Address(r0, sizeof(ConstantPool)));
3537 
3538   __ bind(resolved);
3539 
3540   // Generate subtype check.  Blows r2, r5
3541   // Superklass in r0.  Subklass in r3.
3542   __ gen_subtype_check(r3, ok_is_subtype);
3543 
3544   // Come here on failure
3545   __ mov(r0, 0);
3546   __ b(done);
3547   // Come here on success
3548   __ bind(ok_is_subtype);
3549   __ mov(r0, 1);
3550 
3551   // Collect counts on whether this test sees NULLs a lot or not.
3552   if (ProfileInterpreter) {
3553     __ b(done);
3554     __ bind(is_null);
3555     __ profile_null_seen(r2);
3556   } else {
3557     __ bind(is_null);   // same as 'done'
3558   }
3559   __ bind(done);
3560   // r0 = 0: obj == NULL or  obj is not an instanceof the specified klass
3561   // r0 = 1: obj != NULL and obj is     an instanceof the specified klass
3562 }
3563 
3564 //-----------------------------------------------------------------------------
3565 // Breakpoints
3566 void TemplateTable::_breakpoint() {
3567   // Note: We get here even if we are single stepping..
3568   // jbug inists on setting breakpoints at every bytecode
3569   // even if we are in single step mode.
3570 
3571   transition(vtos, vtos);
3572 
3573   // get the unpatched byte code
3574   __ get_method(c_rarg1);
3575   __ call_VM(noreg,
3576              CAST_FROM_FN_PTR(address,
3577                               InterpreterRuntime::get_original_bytecode_at),
3578              c_rarg1, rbcp);
3579   __ mov(r19, r0);
3580 
3581   // post the breakpoint event
3582   __ call_VM(noreg,
3583              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3584              rmethod, rbcp);
3585 
3586   // complete the execution of original bytecode
3587   __ mov(rscratch1, r19);
3588   __ dispatch_only_normal(vtos);
3589 }
3590 
3591 //-----------------------------------------------------------------------------
3592 // Exceptions
3593 
3594 void TemplateTable::athrow() {
3595   transition(atos, vtos);
3596   __ null_check(r0);
3597   __ b(Interpreter::throw_exception_entry());
3598 }
3599 
3600 //-----------------------------------------------------------------------------
3601 // Synchronization
3602 //
3603 // Note: monitorenter & exit are symmetric routines; which is reflected
3604 //       in the assembly code structure as well
3605 //
3606 // Stack layout:
3607 //
3608 // [expressions  ] <--- esp               = expression stack top
3609 // ..
3610 // [expressions  ]
3611 // [monitor entry] <--- monitor block top = expression stack bot
3612 // ..
3613 // [monitor entry]
3614 // [frame data   ] <--- monitor block bot
3615 // ...
3616 // [saved rbp    ] <--- rbp
3617 void TemplateTable::monitorenter()
3618 {
3619   transition(atos, vtos);
3620 
3621   // check for NULL object
3622   __ null_check(r0);
3623 
3624   const Address monitor_block_top(
3625         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3626   const Address monitor_block_bot(
3627         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3628   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3629 
3630   Label allocated;
3631 
3632   // initialize entry pointer
3633   __ mov(c_rarg1, zr); // points to free slot or NULL
3634 
3635   // find a free slot in the monitor block (result in c_rarg1)
3636   {
3637     Label entry, loop, exit;
3638     __ ldr(c_rarg3, monitor_block_top); // points to current entry,
3639                                         // starting with top-most entry
3640     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3641 
3642     __ b(entry);
3643 
3644     __ bind(loop);
3645     // check if current entry is used
3646     // if not used then remember entry in c_rarg1
3647     __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3648     __ cmp(zr, rscratch1);
3649     __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
3650     // check if current entry is for same object
3651     __ cmp(r0, rscratch1);
3652     // if same object then stop searching
3653     __ br(Assembler::EQ, exit);
3654     // otherwise advance to next entry
3655     __ add(c_rarg3, c_rarg3, entry_size);
3656     __ bind(entry);
3657     // check if bottom reached
3658     __ cmp(c_rarg3, c_rarg2);
3659     // if not at bottom then check this entry
3660     __ br(Assembler::NE, loop);
3661     __ bind(exit);
3662   }
3663 
3664   __ cbnz(c_rarg1, allocated); // check if a slot has been found and
3665                             // if found, continue with that on
3666 
3667   // allocate one if there's no free slot
3668   {
3669     Label entry, loop, no_adjust;
3670     // 1. compute new pointers            // rsp: old expression stack top
3671     __ ldr(c_rarg1, monitor_block_bot);   // c_rarg1: old expression stack bottom
3672     __ sub(esp, esp, entry_size);           // move expression stack top
3673     __ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
3674     __ mov(c_rarg3, esp);                 // set start value for copy loop
3675     __ str(c_rarg1, monitor_block_bot);   // set new monitor block bottom
3676 
3677     __ cmp(sp, c_rarg3);                  // Check if we need to move sp
3678     __ br(Assembler::LO, no_adjust);      // to allow more stack space
3679                                           // for our new esp
3680     __ sub(sp, sp, 2 * wordSize);
3681     __ bind(no_adjust);
3682 
3683     __ b(entry);
3684     // 2. move expression stack contents
3685     __ bind(loop);
3686     __ ldr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3687                                                    // word from old location
3688     __ str(c_rarg2, Address(c_rarg3, 0));          // and store it at new location
3689     __ add(c_rarg3, c_rarg3, wordSize);            // advance to next word
3690     __ bind(entry);
3691     __ cmp(c_rarg3, c_rarg1);        // check if bottom reached
3692     __ br(Assembler::NE, loop);      // if not at bottom then
3693                                      // copy next word
3694   }
3695 
3696   // call run-time routine
3697   // c_rarg1: points to monitor entry
3698   __ bind(allocated);
3699 
3700   // Increment bcp to point to the next bytecode, so exception
3701   // handling for async. exceptions work correctly.
3702   // The object has already been poped from the stack, so the
3703   // expression stack looks correct.
3704   __ increment(rbcp);
3705 
3706   // store object
3707   __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3708   __ lock_object(c_rarg1);
3709 
3710   // check to make sure this monitor doesn't cause stack overflow after locking
3711   __ save_bcp();  // in case of exception
3712   __ generate_stack_overflow_check(0);
3713 
3714   // The bcp has already been incremented. Just need to dispatch to
3715   // next instruction.
3716   __ dispatch_next(vtos);
3717 }
3718 
3719 
3720 void TemplateTable::monitorexit()
3721 {
3722   transition(atos, vtos);
3723 
3724   // check for NULL object
3725   __ null_check(r0);
3726 
3727   const Address monitor_block_top(
3728         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3729   const Address monitor_block_bot(
3730         rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
3731   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3732 
3733   Label found;
3734 
3735   // find matching slot
3736   {
3737     Label entry, loop;
3738     __ ldr(c_rarg1, monitor_block_top); // points to current entry,
3739                                         // starting with top-most entry
3740     __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3741                                         // of monitor block
3742     __ b(entry);
3743 
3744     __ bind(loop);
3745     // check if current entry is for same object
3746     __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3747     __ cmp(r0, rscratch1);
3748     // if same object then stop searching
3749     __ br(Assembler::EQ, found);
3750     // otherwise advance to next entry
3751     __ add(c_rarg1, c_rarg1, entry_size);
3752     __ bind(entry);
3753     // check if bottom reached
3754     __ cmp(c_rarg1, c_rarg2);
3755     // if not at bottom then check this entry
3756     __ br(Assembler::NE, loop);
3757   }
3758 
3759   // error handling. Unlocking was not block-structured
3760   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3761                    InterpreterRuntime::throw_illegal_monitor_state_exception));
3762   __ should_not_reach_here();
3763 
3764   // call run-time routine
3765   __ bind(found);
3766   __ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
3767   __ unlock_object(c_rarg1);
3768   __ pop_ptr(r0); // discard object
3769 }
3770 
3771 
3772 // Wide instructions
3773 void TemplateTable::wide()
3774 {
3775   __ load_unsigned_byte(r19, at_bcp(1));
3776   __ mov(rscratch1, (address)Interpreter::_wentry_point);
3777   __ ldr(rscratch1, Address(rscratch1, r19, Address::uxtw(3)));
3778   __ br(rscratch1);
3779 }
3780 
3781 
3782 // Multi arrays
3783 void TemplateTable::multianewarray() {
3784   transition(vtos, atos);
3785   __ load_unsigned_byte(r0, at_bcp(3)); // get number of dimensions
3786   // last dim is on top of stack; we want address of first one:
3787   // first_addr = last_addr + (ndims - 1) * wordSize
3788   __ lea(c_rarg1, Address(esp, r0, Address::uxtw(3)));
3789   __ sub(c_rarg1, c_rarg1, wordSize);
3790   call_VM(r0,
3791           CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3792           c_rarg1);
3793   __ load_unsigned_byte(r1, at_bcp(3));
3794   __ lea(esp, Address(esp, r1, Address::uxtw(3)));
3795 }